From 835ddbacf414e20faaba36a55a4b9e1add89c8b4 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 11 Jul 2022 21:14:37 +0800 Subject: [PATCH 01/56] fix: fix retry issue --- source/libs/scheduler/inc/schInt.h | 2 ++ source/libs/scheduler/src/schJob.c | 38 ++++++++++++++++++++++++--- source/libs/scheduler/src/schRemote.c | 2 +- source/libs/scheduler/src/schTask.c | 3 ++- 4 files changed, 39 insertions(+), 6 deletions(-) diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index e5c7e37479..4cae547077 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -223,6 +223,7 @@ typedef struct SSchJobAttr { typedef struct { int32_t op; + SRWLatch lock; bool syncReq; } SSchOpStatus; @@ -473,6 +474,7 @@ int32_t schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTas int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel, int32_t levelNum); int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask); void schDirectPostJobRes(SSchedulerReq* pReq, int32_t errCode); +bool schChkCurrentOp(SSchJob *pJob, int32_t op, bool sync); extern SSchDebug gSCHDebug; diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index e482814ee7..19bb93249f 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -443,25 +443,37 @@ int32_t schNotifyUserFetchRes(SSchJob* pJob) { } void schPostJobRes(SSchJob *pJob, SCH_OP_TYPE op) { + SCH_LOCK(SCH_WRITE, &pJob->opStatus.lock); + if (SCH_OP_NULL == pJob->opStatus.op) { SCH_JOB_DLOG("job not in any operation, no need to post job res, status:%s", jobTaskStatusStr(pJob->status)); - return; + goto _return; } if (op && pJob->opStatus.op != op) { SCH_JOB_ELOG("job in operation %s mis-match with expected %s", schGetOpStr(pJob->opStatus.op), schGetOpStr(op)); - return; + goto _return; } if (SCH_JOB_IN_SYNC_OP(pJob)) { + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); tsem_post(&pJob->rspSem); } else if (SCH_JOB_IN_ASYNC_EXEC_OP(pJob)) { + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); schNotifyUserExecRes(pJob); } else if (SCH_JOB_IN_ASYNC_FETCH_OP(pJob)) { + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); schNotifyUserFetchRes(pJob); } else { + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); SCH_JOB_ELOG("job not in any operation, status:%s", jobTaskStatusStr(pJob->status)); } + + return; + +_return: + + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); } int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCode) { @@ -658,13 +670,13 @@ int32_t schJobFetchRows(SSchJob *pJob) { if (!(pJob->attr.explainMode == EXPLAIN_MODE_STATIC)) { SCH_ERR_RET(schLaunchFetchTask(pJob)); - if (pJob->opStatus.syncReq) { + if (schChkCurrentOp(pJob, SCH_OP_FETCH, true)) { SCH_JOB_DLOG("sync wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); tsem_wait(&pJob->rspSem); SCH_RET(schDumpJobFetchRes(pJob, pJob->userRes.fetchRes)); } } else { - if (pJob->opStatus.syncReq) { + if (schChkCurrentOp(pJob, SCH_OP_FETCH, true)) { SCH_RET(schDumpJobFetchRes(pJob, pJob->userRes.fetchRes)); } else { schPostJobRes(pJob, SCH_OP_FETCH); @@ -775,25 +787,37 @@ void schDirectPostJobRes(SSchedulerReq* pReq, int32_t errCode) { } } +bool schChkCurrentOp(SSchJob *pJob, int32_t op, bool sync) { + SCH_LOCK(SCH_READ, &pJob->opStatus.lock); + bool r = (pJob->opStatus.op == op) && (pJob->opStatus.syncReq == sync); + SCH_UNLOCK(SCH_READ, &pJob->opStatus.lock); + + return r; +} + void schProcessOnOpEnd(SSchJob *pJob, SCH_OP_TYPE type, SSchedulerReq* pReq, int32_t errCode) { int32_t op = 0; switch (type) { case SCH_OP_EXEC: if (pReq && pReq->syncReq) { + SCH_LOCK(SCH_WRITE, &pJob->opStatus.lock); op = atomic_val_compare_exchange_32(&pJob->opStatus.op, type, SCH_OP_NULL); if (SCH_OP_NULL == op || op != type) { SCH_JOB_ELOG("job not in %s operation, op:%s, status:%s", schGetOpStr(type), schGetOpStr(op), jobTaskStatusStr(pJob->status)); } + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); schDumpJobExecRes(pJob, pReq->pExecRes); } break; case SCH_OP_FETCH: if (pReq && pReq->syncReq) { + SCH_LOCK(SCH_WRITE, &pJob->opStatus.lock); op = atomic_val_compare_exchange_32(&pJob->opStatus.op, type, SCH_OP_NULL); if (SCH_OP_NULL == op || op != type) { SCH_JOB_ELOG("job not in %s operation, op:%s, status:%s", schGetOpStr(type), schGetOpStr(op), jobTaskStatusStr(pJob->status)); } + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); } break; case SCH_OP_GET_STATUS: @@ -816,8 +840,10 @@ int32_t schProcessOnOpBegin(SSchJob* pJob, SCH_OP_TYPE type, SSchedulerReq* pReq switch (type) { case SCH_OP_EXEC: + SCH_LOCK(SCH_WRITE, &pJob->opStatus.lock); if (SCH_OP_NULL != atomic_val_compare_exchange_32(&pJob->opStatus.op, SCH_OP_NULL, type)) { SCH_JOB_ELOG("job already in %s operation", schGetOpStr(pJob->opStatus.op)); + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); schDirectPostJobRes(pReq, TSDB_CODE_TSC_APP_ERROR); SCH_ERR_RET(TSDB_CODE_TSC_APP_ERROR); } @@ -825,10 +851,13 @@ int32_t schProcessOnOpBegin(SSchJob* pJob, SCH_OP_TYPE type, SSchedulerReq* pReq SCH_JOB_DLOG("job start %s operation", schGetOpStr(pJob->opStatus.op)); pJob->opStatus.syncReq = pReq->syncReq; + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); break; case SCH_OP_FETCH: + SCH_LOCK(SCH_WRITE, &pJob->opStatus.lock); if (SCH_OP_NULL != atomic_val_compare_exchange_32(&pJob->opStatus.op, SCH_OP_NULL, type)) { SCH_JOB_ELOG("job already in %s operation", schGetOpStr(pJob->opStatus.op)); + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); schDirectPostJobRes(pReq, TSDB_CODE_TSC_APP_ERROR); SCH_ERR_RET(TSDB_CODE_TSC_APP_ERROR); } @@ -840,6 +869,7 @@ int32_t schProcessOnOpBegin(SSchJob* pJob, SCH_OP_TYPE type, SSchedulerReq* pReq pJob->userRes.cbParam = pReq->cbParam; pJob->opStatus.syncReq = pReq->syncReq; + SCH_UNLOCK(SCH_WRITE, &pJob->opStatus.lock); if (!SCH_JOB_NEED_FETCH(pJob)) { SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob)); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 2257ba8328..3db1ba7be8 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -940,7 +940,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, if (NULL == addr) { addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); isCandidateAddr = true; - SCH_TASK_DLOG("target candidateIdx %d", pTask->candidateIdx); + SCH_TASK_DLOG("target candidateIdx %d, epInUse %d/%d", pTask->candidateIdx, addr->epSet.inUse, addr->epSet.numOfEps); } switch (msgType) { diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index a6621d279d..d77fbc33fd 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -545,7 +545,8 @@ int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) { schDeregisterTaskHb(pJob, pTask); if (SCH_IS_DATA_BIND_TASK(pTask)) { - SCH_SWITCH_EPSET(&pTask->plan->execNode); + SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); + SCH_SWITCH_EPSET(addr); } else { SCH_ERR_RET(schSwitchTaskCandidateAddr(pJob, pTask)); } From 7fcf80a8f92515f9981290943434558225748f9c Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 12 Jul 2022 09:24:01 +0800 Subject: [PATCH 02/56] enh: add more retry times for data src task --- source/libs/scheduler/src/schTask.c | 2 ++ tests/script/api/stopquery.c | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index d77fbc33fd..b83e24d6df 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -508,6 +508,7 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo return TSDB_CODE_SUCCESS; } +/* if (SCH_IS_DATA_BIND_TASK(pTask)) { if ((pTask->execId + 1) >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) { *needRetry = false; @@ -525,6 +526,7 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo return TSDB_CODE_SUCCESS; } } +*/ *needRetry = true; SCH_TASK_DLOG("task need the %dth retry, errCode:%x - %s", pTask->execId + 1, errCode, tstrerror(errCode)); diff --git a/tests/script/api/stopquery.c b/tests/script/api/stopquery.c index 0f27fdf9f9..eeaf9295d9 100644 --- a/tests/script/api/stopquery.c +++ b/tests/script/api/stopquery.c @@ -578,6 +578,8 @@ int sqConKillSyncQuery(bool fetch) { pthread_join(qid, NULL); pthread_join(cid, NULL); + + taos_close(param.taos); } CASE_LEAVE(); } @@ -593,6 +595,8 @@ int sqConKillAsyncQuery(bool fetch) { pthread_join(qid, NULL); pthread_join(cid, NULL); + + taos_close(param.taos); } CASE_LEAVE(); } @@ -600,7 +604,6 @@ int sqConKillAsyncQuery(bool fetch) { void sqRunAllCase(void) { -/* sqStopSyncQuery(false); sqStopSyncQuery(true); sqStopAsyncQuery(false); @@ -620,17 +623,14 @@ void sqRunAllCase(void) { sqConCloseSyncQuery(true); sqConCloseAsyncQuery(false); sqConCloseAsyncQuery(true); -*/ -#if 0 sqKillSyncQuery(false); sqKillSyncQuery(true); sqKillAsyncQuery(false); sqKillAsyncQuery(true); -#endif - //sqConKillSyncQuery(false); + sqConKillSyncQuery(false); sqConKillSyncQuery(true); #if 0 sqConKillAsyncQuery(false); From 9f0152239d9bacda1168e1de5568ef1af5362a9d Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 12 Jul 2022 14:56:12 +0800 Subject: [PATCH 03/56] fix: fix stop query --- source/libs/catalog/src/catalog.c | 1 + source/util/src/tref.c | 2 +- tests/script/api/stopquery.c | 14 ++++++++++---- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 3a7ad4a2d6..1b7f53ae67 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -1293,6 +1293,7 @@ void catalogDestroy(void) { if (!taosCheckCurrentInDll()) { ctgClearCacheEnqueue(NULL, true, true, true); + taosThreadJoin(gCtgMgmt.updateThread, NULL); } taosHashCleanup(gCtgMgmt.pCluster); diff --git a/source/util/src/tref.c b/source/util/src/tref.c index 2e4c33bc87..9cd849b9be 100644 --- a/source/util/src/tref.c +++ b/source/util/src/tref.c @@ -431,7 +431,7 @@ static int32_t taosDecRefCount(int32_t rsetId, int64_t rid, int32_t remove) { } released = 1; } else { - uTrace("rsetId:%d p:%p rid:%" PRId64 " is released", rsetId, pNode->p, rid); + uTrace("rsetId:%d p:%p rid:%" PRId64 " is released, remain count %d", rsetId, pNode->p, rid, pNode->count); } } else { uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid); diff --git a/tests/script/api/stopquery.c b/tests/script/api/stopquery.c index eeaf9295d9..a42e9e2d44 100644 --- a/tests/script/api/stopquery.c +++ b/tests/script/api/stopquery.c @@ -36,7 +36,7 @@ int64_t st, et; char hostName[128]; char dbName[128]; char tbName[128]; -int32_t runTimes = 10000; +int32_t runTimes = 1000; typedef struct { int id; @@ -88,6 +88,7 @@ static void sqExecSQLE(TAOS *taos, char *command) { void sqExit(char* prefix, const char* errMsg) { fprintf(stderr, "%s error: %s\n", prefix, errMsg); + sleep(10000); exit(1); } @@ -141,16 +142,20 @@ void sqCloseFetchCb(void *param, TAOS_RES *pRes, int numOfRows) { taos_close(qParam->taos); *qParam->end = 1; + + taos_free_result(pRes); } void sqCloseQueryCb(void *param, TAOS_RES *pRes, int code) { SSP_CB_PARAM *qParam = (SSP_CB_PARAM *)param; if (code == 0 && pRes) { if (qParam->fetch) { - taos_fetch_rows_a(pRes, sqFreeFetchCb, param); + taos_fetch_rows_a(pRes, sqCloseFetchCb, param); } else { taos_close(qParam->taos); *qParam->end = 1; + + taos_free_result(pRes); } } else { sqExit("select", taos_errstr(pRes)); @@ -358,6 +363,7 @@ int sqCloseSyncQuery(bool fetch) { } taos_close(taos); + taos_free_result(pRes); } CASE_LEAVE(); } @@ -382,7 +388,7 @@ int sqCloseAsyncQuery(bool fetch) { SSP_CB_PARAM param = {0}; param.fetch = fetch; param.end = &qEnd; - taos_query_a(taos, sql, sqFreeQueryCb, ¶m); + taos_query_a(taos, sql, sqCloseQueryCb, ¶m); while (0 == qEnd) { usleep(5000); } @@ -640,7 +646,7 @@ void sqRunAllCase(void) { int32_t l = 5; while (l) { printf("%d\n", l--); - sleep(1); + sleep(1000); } } From 473e134f310c699249f319ff773eb4830f32eb2a Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 12 Jul 2022 16:57:19 +0800 Subject: [PATCH 04/56] refactor(sync): add resp ttl clean --- include/libs/sync/sync.h | 2 + source/dnode/vnode/src/vnd/vnodeSync.c | 23 ++++++++---- source/libs/sync/src/syncMain.c | 9 +---- source/libs/sync/src/syncRespMgr.c | 51 ++++++++++---------------- source/libs/sync/src/syncTimeout.c | 2 + 5 files changed, 41 insertions(+), 46 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index bef26cb310..c226d7c8cc 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -26,6 +26,8 @@ extern "C" { extern bool gRaftDetailLog; +#define SYNC_RESP_TTL_MS 5000 + #define SYNC_MAX_BATCH_SIZE 500 #define SYNC_INDEX_BEGIN 0 #define SYNC_INDEX_INVALID -1 diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index bdcfe208d6..87148a8450 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -427,13 +427,22 @@ static void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta c syncGetVgId(pVnode->sync), pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), pMsg->msgType, TMSG_INFO(pMsg->msgType)); - SRpcMsg rpcMsg = {.msgType = pMsg->msgType, .contLen = pMsg->contLen}; - rpcMsg.pCont = rpcMallocCont(rpcMsg.contLen); - memcpy(rpcMsg.pCont, pMsg->pCont, pMsg->contLen); - syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &rpcMsg.info); - rpcMsg.info.conn.applyIndex = cbMeta.index; - rpcMsg.info.conn.applyTerm = cbMeta.term; - tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, &rpcMsg); + if (cbMeta.code == 0) { + SRpcMsg rpcMsg = {.msgType = pMsg->msgType, .contLen = pMsg->contLen}; + rpcMsg.pCont = rpcMallocCont(rpcMsg.contLen); + memcpy(rpcMsg.pCont, pMsg->pCont, pMsg->contLen); + syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &rpcMsg.info); + rpcMsg.info.conn.applyIndex = cbMeta.index; + rpcMsg.info.conn.applyTerm = cbMeta.term; + tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, &rpcMsg); + } else { + SRpcMsg rsp = {.code = cbMeta.code, .info = pMsg->info}; + vError("vgId:%d, sync commit error, msgtype:%d,%s, error:0x%X, errmsg:%s", syncGetVgId(pVnode->sync), pMsg->msgType, + TMSG_INFO(pMsg->msgType), cbMeta.code, tstrerror(cbMeta.code)); + if (rsp.info.handle != NULL) { + tmsgSendRsp(&rsp); + } + } } static void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index abc0f53611..918496c894 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -1055,19 +1055,12 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) { } // tools - pSyncNode->pSyncRespMgr = syncRespMgrCreate(pSyncNode, 0); + pSyncNode->pSyncRespMgr = syncRespMgrCreate(pSyncNode, SYNC_RESP_TTL_MS); ASSERT(pSyncNode->pSyncRespMgr != NULL); // restore state pSyncNode->restoreFinish = false; - // pSyncNode->pSnapshot = NULL; - // if (pSyncNode->pFsm->FpGetSnapshotInfo != NULL) { - // pSyncNode->pSnapshot = taosMemoryMalloc(sizeof(SSnapshot)); - // pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, pSyncNode->pSnapshot); - // } - // tsem_init(&(pSyncNode->restoreSem), 0, 0); - // snapshot senders for (int i = 0; i < TSDB_MAX_REPLICA; ++i) { SSyncSnapshotSender* pSender = snapshotSenderCreate(pSyncNode, i); diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c index eaeadd3991..2aaa98b299 100644 --- a/source/libs/sync/src/syncRespMgr.c +++ b/source/libs/sync/src/syncRespMgr.c @@ -122,54 +122,43 @@ void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) { int cnt = 0; SSyncNode *pSyncNode = pObj->data; - SArray *delIndexArray = taosArrayInit(0, sizeof(SyncIndex)); + SArray *delIndexArray = taosArrayInit(0, sizeof(uint64_t)); ASSERT(delIndexArray != NULL); while (pStub) { - size_t len; - void * key = taosHashGetKey(pStub, &len); - SyncIndex *pIndex = (SyncIndex *)key; + size_t len; + void *key = taosHashGetKey(pStub, &len); + uint64_t *pSeqNum = (uint64_t *)key; int64_t nowMS = taosGetTimestampMs(); if (nowMS - pStub->createTime > ttl) { - taosArrayPush(delIndexArray, pIndex); + taosArrayPush(delIndexArray, pSeqNum); cnt++; - SSyncRaftEntry *pEntry = NULL; - int32_t code = 0; - if (pSyncNode->pLogStore != NULL) { - code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, *pIndex, &pEntry); - if (code == 0 && pEntry != NULL) { - SFsmCbMeta cbMeta = {0}; - cbMeta.index = pEntry->index; - cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(pSyncNode, cbMeta.index); - cbMeta.isWeak = pEntry->isWeak; - cbMeta.code = TSDB_CODE_SYN_TIMEOUT; - cbMeta.state = pSyncNode->state; - cbMeta.seqNum = pEntry->seqNum; - cbMeta.term = pEntry->term; - cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; - cbMeta.flag = 0; + SFsmCbMeta cbMeta = {0}; + cbMeta.index = SYNC_INDEX_INVALID; + cbMeta.lastConfigIndex = SYNC_INDEX_INVALID; + cbMeta.isWeak = false; + cbMeta.code = TSDB_CODE_SYN_TIMEOUT; + cbMeta.state = pSyncNode->state; + cbMeta.seqNum = *pSeqNum; + cbMeta.term = SYNC_TERM_INVALID; + cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; + cbMeta.flag = 0; - SRpcMsg rpcMsg = pStub->rpcMsg; - rpcMsg.pCont = rpcMallocCont(pEntry->dataLen); - memcpy(rpcMsg.pCont, pEntry->data, pEntry->dataLen); - pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &rpcMsg, cbMeta); - - syncEntryDestory(pEntry); - } - } + pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &(pStub->rpcMsg), cbMeta); } pStub = (SRespStub *)taosHashIterate(pObj->pRespHash, pStub); } int32_t arraySize = taosArrayGetSize(delIndexArray); - sDebug("vgId:%d, resp clean by ttl, cnt:%d, array-size:%d", pSyncNode->vgId, cnt, arraySize); + sDebug("vgId:%d, resp mgr clean by ttl, cnt:%d, array-size:%d", pSyncNode->vgId, cnt, arraySize); for (int32_t i = 0; i < arraySize; ++i) { - SyncIndex *pIndex = taosArrayGet(delIndexArray, i); - taosHashRemove(pObj->pRespHash, pIndex, sizeof(SyncIndex)); + uint64_t *pSeqNum = taosArrayGet(delIndexArray, i); + taosHashRemove(pObj->pRespHash, pSeqNum, sizeof(uint64_t)); + sDebug("vgId:%d, resp mgr clean by ttl, seq:%d", pSyncNode->vgId, *pSeqNum); } taosArrayDestroy(delIndexArray); } diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c index 52181a3da8..97de75c108 100644 --- a/source/libs/sync/src/syncTimeout.c +++ b/source/libs/sync/src/syncTimeout.c @@ -16,9 +16,11 @@ #include "syncTimeout.h" #include "syncElection.h" #include "syncReplication.h" +#include "syncRespMgr.h" int32_t syncNodeTimerRoutine(SSyncNode* ths) { syncNodeEventLog(ths, "timer routines ... "); + syncRespClean(ths->pSyncRespMgr); return 0; } From e91ca30b19a21589cc78393350c957623fa56c91 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 12 Jul 2022 20:23:31 +0800 Subject: [PATCH 05/56] refactor(sync): add resp ttl clean --- source/libs/sync/src/syncRespMgr.c | 2 ++ source/libs/sync/src/syncTimeout.c | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c index 2aaa98b299..8e477a5159 100644 --- a/source/libs/sync/src/syncRespMgr.c +++ b/source/libs/sync/src/syncRespMgr.c @@ -146,6 +146,8 @@ void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) { cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; cbMeta.flag = 0; + pStub->rpcMsg.pCont = NULL; + pStub->rpcMsg.contLen = 0; pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &(pStub->rpcMsg), cbMeta); } diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c index 97de75c108..ad5f82900c 100644 --- a/source/libs/sync/src/syncTimeout.c +++ b/source/libs/sync/src/syncTimeout.c @@ -20,7 +20,10 @@ int32_t syncNodeTimerRoutine(SSyncNode* ths) { syncNodeEventLog(ths, "timer routines ... "); - syncRespClean(ths->pSyncRespMgr); + + if (ths->vgId != 1) { + syncRespClean(ths->pSyncRespMgr); + } return 0; } From 8aa09ffee84bd4223c25f278fb7c228312991dcf Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Wed, 13 Jul 2022 08:42:10 +0800 Subject: [PATCH 06/56] add sysinfo test case --- tests/system-test/0-others/sysinfo.py | 59 +++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 tests/system-test/0-others/sysinfo.py diff --git a/tests/system-test/0-others/sysinfo.py b/tests/system-test/0-others/sysinfo.py new file mode 100644 index 0000000000..d74c4f6db9 --- /dev/null +++ b/tests/system-test/0-others/sysinfo.py @@ -0,0 +1,59 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +from util.log import * +from util.cases import * +from util.sql import * +import subprocess +from util.common import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.dbname = 'db' + def get_database_info(self): + tdSql.query('select database()') + tdSql.checkData(0,0,None) + tdSql.execute(f'create database if not exists {self.dbname}') + tdSql.execute(f'use {self.dbname}') + tdSql.query('select database()') + tdSql.checkData(0,0,self.dbname) + tdSql.execute(f'drop database {self.dbname}') + + def check_version(self): + taos_list = ['server','client'] + for i in taos_list: + tdSql.query(f'select {i}_version()') + version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1] + tdSql.checkData(0,0,version_info) + + def get_server_status(self): + tdSql.query('select server_status()') + tdSql.checkData(0,0,1) + tdDnodes.stoptaosd(1) + + tdSql.query('select server_status()') + print(tdSql.queryResult) + def run(self): + self.get_database_info() + self.check_version() + # self.get_server_status() + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 589d5b5999dde9ac0aeb1499c8b098fb8a5850b0 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Wed, 13 Jul 2022 09:50:38 +0800 Subject: [PATCH 07/56] update count.py --- tests/system-test/2-query/count.py | 376 ++++++++++++++++------------- 1 file changed, 213 insertions(+), 163 deletions(-) diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index c4c1d19898..70966e387a 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -1,188 +1,238 @@ from util.log import * from util.sql import * from util.cases import * - +from util.sqlset import * class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor()) - + tdSql.init(conn.cursor(),logSql) + self.setsql = TDSetSql() self.rowNum = 10 self.ts = 1537146000000 - def run(self): + self.ntbname = 'ntb' + self.stbname = 'stb' + self.column_dict = { + 'ts':'timestamp', + 'c1':'int', + 'c2':'float', + 'c3':'double', + 'c4':'timestamp' + } + self.tag_dict = { + 't0':'int' + } + # The number of tag_values should be same as tbnum + self.tbnum = 2 + self.tag_values = [ + f'10', + f'100' + ] + self.values_list = [ + f'now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据"' + ] + # def count_query_stb(self,stbname,tbnum): + # tdSql.query(f'select count(tbname) from {stbname}') + # print(tdSql.queryResult) + # tdSql.checkEqual(tdSql.queryResult[0],tbnum) + + def check_ntb(self): tdSql.prepare() + tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute("create table stb_2 using stb tags('shanghai')") + def check_stb(self): + tdSql.prepare() + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) + for i in range(self.tbnum): + tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[i]})') + tdSql.query(f'select count(tbname) from {self.stbname}') + tdSql.checkRows(0) + tdSql.execute('flush database db') + tdSql.query(f'select count(tbname) from {self.stbname}') + tdSql.checkRows(0) + for i in range(self.tbnum): + for j in self.values_list: + tdSql.execute(f'insert into {self.stbname}_{i} values({j})') + tdSql.query(f'select count(tbname) from {self.stbname}') + tdSql.checkRows(0) + tdSql.execute('flush database db') + tdSql.query(f'select count(tbname) from {self.stbname}') + tdSql.checkRows(0) + + def run(self): + self.check_stb() + # tdSql.prepare() - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + # tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + # tdSql.execute("create table stb_1 using stb tags('beijing')") + # tdSql.execute("create table stb_2 using stb tags('shanghai')") - for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - tdSql.execute("insert into stb_2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + # tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') - for i in range(self.rowNum): - tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + # for i in range(self.rowNum): + # tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + # tdSql.execute("insert into stb_2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - tdSql.query("select count(*) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(*) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(ts) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(ts) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col1) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col1) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col2) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col2) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col3) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col3) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col4) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col4) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col5) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col5) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col6) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col6) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col7) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col7) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col8) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col8) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col9) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col9) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col11) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col11) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col12) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col12) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col13) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col13) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col14) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col14) from db.stb") - tdSql.checkData(0,0,20) + # for i in range(self.rowNum): + # tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + # tdSql.query("select count(*) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(*) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(ts) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(ts) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col1) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col1) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col2) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col2) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col3) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col3) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col4) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col4) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col5) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col5) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col6) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col6) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col7) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col7) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col8) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col8) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col9) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col9) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col11) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col11) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col12) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col12) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col13) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col13) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col14) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col14) from db.stb") + # tdSql.checkData(0,0,20) - tdSql.query("select count(ts) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(ts) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col1) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col1) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col2) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col2) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col3) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col3) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col4) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col4) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col5) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col5) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col6) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col6) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col7) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col7) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col8) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col8) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col9) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col9) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col11) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col11) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col12) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col12) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col13) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col13) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col14) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col14) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col1) from stb_1 group by col7") - tdSql.checkRows(2) + # tdSql.query("select count(ts) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(ts) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col1) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col1) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col2) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col2) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col3) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col3) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col4) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col4) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col5) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col5) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col6) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col6) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col7) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col7) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col8) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col8) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col9) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col9) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col11) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col11) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col12) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col12) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col13) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col13) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col14) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col14) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col1) from stb_1 group by col7") + # tdSql.checkRows(2) - tdSql.execute("insert into stb_1 values(now,null,null,null,null,null,null,null,null,null,null,null,null,null)") - tdSql.query("select count(col1) from stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col1),count(ts) from stb_1") - tdSql.checkData(0,0,10) - tdSql.checkData(0,1,11) + # tdSql.execute("insert into stb_1 values(now,null,null,null,null,null,null,null,null,null,null,null,null,null)") + # tdSql.query("select count(col1) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col1),count(ts) from stb_1") + # tdSql.checkData(0,0,10) + # tdSql.checkData(0,1,11) - tdSql.query("select count(col1) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.query("select count(col1),count(ts) from db.stb_1") - tdSql.checkData(0,0,10) - tdSql.checkData(0,1,11) + # tdSql.query("select count(col1) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.query("select count(col1),count(ts) from db.stb_1") + # tdSql.checkData(0,0,10) + # tdSql.checkData(0,1,11) - tdSql.query("select count(col1) from stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col1),count(ts) from stb") - tdSql.checkData(0,0,20) - tdSql.checkData(0,1,21) + # tdSql.query("select count(col1) from stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col1),count(ts) from stb") + # tdSql.checkData(0,0,20) + # tdSql.checkData(0,1,21) - tdSql.query("select count(col1) from db.stb") - tdSql.checkData(0,0,20) - tdSql.query("select count(col1),count(ts) from db.stb") - tdSql.checkData(0,0,20) - tdSql.checkData(0,1,21) - tdSql.query("select count(col1) from stb_1 group by col7") - tdSql.checkRows(3) - tdSql.query("select count(col1) from stb_2 group by col7") - tdSql.checkRows(2) - tdSql.query("select count(col1) from stb group by col7") - tdSql.checkRows(3) + # tdSql.query("select count(col1) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.query("select count(col1),count(ts) from db.stb") + # tdSql.checkData(0,0,20) + # tdSql.checkData(0,1,21) + # tdSql.query("select count(col1) from stb_1 group by col7") + # tdSql.checkRows(3) + # tdSql.query("select count(col1) from stb_2 group by col7") + # tdSql.checkRows(2) + # tdSql.query("select count(col1) from stb group by col7") + # tdSql.checkRows(3) From 15ea5e84ed195af0094f04ce1be06fa603bdd0f1 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Wed, 13 Jul 2022 11:17:35 +0800 Subject: [PATCH 08/56] update --- tests/pytest/util/sqlset.py | 2 +- tests/system-test/2-query/count.py | 262 ++++++++--------------------- 2 files changed, 71 insertions(+), 193 deletions(-) diff --git a/tests/pytest/util/sqlset.py b/tests/pytest/util/sqlset.py index bccd49a209..3c1b6cd7f7 100644 --- a/tests/pytest/util/sqlset.py +++ b/tests/pytest/util/sqlset.py @@ -41,7 +41,7 @@ class TDSetSql: create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})' return create_stb_sql - def set_insertsql(self,column_dict,tbname,binary_str,nchar_str): + def set_insertsql(self,column_dict,tbname,binary_str=None,nchar_str=None): sql = '' for k, v in column_dict.items(): if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \ diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index 70966e387a..fef5a38e8f 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -28,215 +28,93 @@ class TDTestCase: f'10', f'100' ] - self.values_list = [ - f'now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据"' - ] - # def count_query_stb(self,stbname,tbnum): - # tdSql.query(f'select count(tbname) from {stbname}') - # print(tdSql.queryResult) - # tdSql.checkEqual(tdSql.queryResult[0],tbnum) - + def query_stb(self,k,stbname,tbnum,rownum): + tdSql.query(f'select count({k}) from {stbname}') + tdSql.checkEqual(tdSql.queryResult[0][0],tbnum*rownum) + tdSql.query(f'select count({k}) from {stbname} where ts <={self.ts+self.rowNum-1}') + tdSql.checkEqual(tdSql.queryResult[0][0],tbnum*rownum) + tdSql.query(f'select count({k}) from {stbname} where ts <={self.ts+self.rowNum-2}') + tdSql.checkEqual(tdSql.queryResult[0][0],tbnum*(rownum-1)) + def query_ctb(self,k,i,stbname,rownum): + tdSql.query(f'select count({k}) from {stbname}_{i}') + tdSql.checkEqual(tdSql.queryResult[0][0],rownum) + tdSql.query(f'select count({k}) from {stbname}_{i} where ts <={self.ts+self.rowNum-1}') + tdSql.checkEqual(tdSql.queryResult[0][0],rownum) + tdSql.query(f'select count({k}) from {stbname}_{i} where ts <={self.ts+self.rowNum-2}') + tdSql.checkEqual(tdSql.queryResult[0][0],rownum-1) + def query_ntb(self,k,ntbname,rownum): + tdSql.query(f'select count({k}) from {ntbname}') + tdSql.checkEqual(tdSql.queryResult[0][0],rownum) + tdSql.query(f'select count({k}) from {ntbname} where ts <={self.ts+self.rowNum-1}') + tdSql.checkEqual(tdSql.queryResult[0][0],rownum) + tdSql.query(f'select count({k}) from {ntbname} where ts <={self.ts+self.rowNum-2}') + tdSql.checkEqual(tdSql.queryResult[0][0],rownum-1) + def count_query_stb(self,column_dict,tag_dict,stbname,tbnum,rownum): + tdSql.query(f'select count(tbname) from {stbname}') + tdSql.checkEqual(tdSql.queryResult[0][0],tbnum*rownum) + tdSql.query(f'SELECT count(*) from (select distinct tbname from {stbname})') + tdSql.checkEqual(tdSql.queryResult[0][0],tbnum) + for k in column_dict.keys(): + self.query_stb(k,stbname,tbnum,rownum) + for k in tag_dict.keys(): + self.query_stb(k,stbname,tbnum,rownum) + def count_query_ctb(self,column_dict,tag_dict,stbname,tbnum,rownum): + for i in range(tbnum): + tdSql.query(f'select count(tbname) from {stbname}_{i}') + tdSql.checkEqual(tdSql.queryResult[0][0],rownum) + for k in column_dict.keys(): + self.query_ctb(k,i,stbname,rownum) + for k in tag_dict.keys(): + self.query_ctb(k,i,stbname,rownum) + def count_query_ntb(self,column_dict,ntbname,rownum): + tdSql.query(f'select count(tbname) from {ntbname}') + tdSql.checkEqual(tdSql.queryResult[0][0],rownum) + for k in column_dict.keys(): + self.query_ntb(k,ntbname,rownum) + def insert_data(self,column_dict,tbname,row_num): + insert_sql = self.setsql.set_insertsql(column_dict,tbname) + for i in range(row_num): + insert_list = [] + self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts) def check_ntb(self): tdSql.prepare() tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) - + tdSql.query(f'select count(tbname) from {self.ntbname}') + tdSql.checkRows(0) + tdSql.execute('flush database db') + tdSql.query(f'select count(tbname) from {self.ntbname}') + tdSql.checkRows(0) + self.insert_data(self.column_dict,self.ntbname,self.rowNum) + self.count_query_ntb(self.column_dict,self.ntbname,self.rowNum) + tdSql.execute('flush database db') + self.count_query_ntb(self.column_dict,self.ntbname,self.rowNum) + tdSql.execute('drop database db') def check_stb(self): tdSql.prepare() tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) for i in range(self.tbnum): tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[i]})') + #!TODO + # tdSql.query(f'SELECT count(*) from (select distinct tbname from {self.stbname})') + # tdSql.checkEqual(tdSql.queryResult[0][0],self.tbnum) tdSql.query(f'select count(tbname) from {self.stbname}') tdSql.checkRows(0) tdSql.execute('flush database db') tdSql.query(f'select count(tbname) from {self.stbname}') tdSql.checkRows(0) + #!TODO + # tdSql.query(f'SELECT count(*) from (select distinct tbname from {self.stbname})') + # tdSql.checkEqual(tdSql.queryResult[0][0],self.tbnum) for i in range(self.tbnum): - for j in self.values_list: - tdSql.execute(f'insert into {self.stbname}_{i} values({j})') - tdSql.query(f'select count(tbname) from {self.stbname}') - tdSql.checkRows(0) + self.insert_data(self.column_dict,f'{self.stbname}_{i}',self.rowNum) + self.count_query_stb(self.column_dict,self.tag_dict,self.stbname,self.tbnum,self.rowNum) tdSql.execute('flush database db') - tdSql.query(f'select count(tbname) from {self.stbname}') - tdSql.checkRows(0) - + self.count_query_stb(self.column_dict,self.tag_dict,self.stbname,self.tbnum,self.rowNum) + self.count_query_ctb(self.column_dict,self.tag_dict,self.stbname,self.tbnum,self.rowNum) + tdSql.execute('drop database db') def run(self): self.check_stb() - # tdSql.prepare() - - # tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - # tdSql.execute("create table stb_1 using stb tags('beijing')") - # tdSql.execute("create table stb_2 using stb tags('shanghai')") - - # tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') - - # for i in range(self.rowNum): - # tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - # tdSql.execute("insert into stb_2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - - # for i in range(self.rowNum): - # tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - - # tdSql.query("select count(*) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(*) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(ts) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(ts) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col1) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col1) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col2) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col2) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col3) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col3) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col4) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col4) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col5) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col5) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col6) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col6) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col7) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col7) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col8) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col8) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col9) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col9) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col11) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col11) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col12) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col12) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col13) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col13) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col14) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col14) from db.stb") - # tdSql.checkData(0,0,20) - - - - # tdSql.query("select count(ts) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(ts) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col1) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col1) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col2) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col2) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col3) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col3) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col4) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col4) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col5) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col5) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col6) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col6) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col7) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col7) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col8) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col8) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col9) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col9) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col11) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col11) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col12) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col12) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col13) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col13) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col14) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col14) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col1) from stb_1 group by col7") - # tdSql.checkRows(2) - - # tdSql.execute("insert into stb_1 values(now,null,null,null,null,null,null,null,null,null,null,null,null,null)") - # tdSql.query("select count(col1) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col1),count(ts) from stb_1") - # tdSql.checkData(0,0,10) - # tdSql.checkData(0,1,11) - - # tdSql.query("select count(col1) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.query("select count(col1),count(ts) from db.stb_1") - # tdSql.checkData(0,0,10) - # tdSql.checkData(0,1,11) - - # tdSql.query("select count(col1) from stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col1),count(ts) from stb") - # tdSql.checkData(0,0,20) - # tdSql.checkData(0,1,21) - - # tdSql.query("select count(col1) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.query("select count(col1),count(ts) from db.stb") - # tdSql.checkData(0,0,20) - # tdSql.checkData(0,1,21) - # tdSql.query("select count(col1) from stb_1 group by col7") - # tdSql.checkRows(3) - # tdSql.query("select count(col1) from stb_2 group by col7") - # tdSql.checkRows(2) - # tdSql.query("select count(col1) from stb group by col7") - # tdSql.checkRows(3) - - - - + self.check_ntb() def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From 62041beb97221a21dd8d3578ab40bf90634608e0 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Wed, 13 Jul 2022 11:20:15 +0800 Subject: [PATCH 09/56] add test case info ci --- tests/system-test/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 269f83a139..9d153caa14 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -11,7 +11,7 @@ python3 ./test.py -f 0-others/udfTest.py python3 ./test.py -f 0-others/udf_create.py python3 ./test.py -f 0-others/udf_restart_taosd.py python3 ./test.py -f 0-others/cachelast.py - +python3 ./test.py -f 0-others/sysinfo.py python3 ./test.py -f 0-others/user_control.py python3 ./test.py -f 0-others/fsync.py From 2ddd1ba7d3685095e6c15390823b19c4b953451c Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Wed, 13 Jul 2022 11:31:20 +0800 Subject: [PATCH 10/56] update --- tests/system-test/2-query/count.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index fef5a38e8f..c83ff43c51 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -108,6 +108,7 @@ class TDTestCase: for i in range(self.tbnum): self.insert_data(self.column_dict,f'{self.stbname}_{i}',self.rowNum) self.count_query_stb(self.column_dict,self.tag_dict,self.stbname,self.tbnum,self.rowNum) + self.count_query_ctb(self.column_dict,self.tag_dict,self.stbname,self.tbnum,self.rowNum) tdSql.execute('flush database db') self.count_query_stb(self.column_dict,self.tag_dict,self.stbname,self.tbnum,self.rowNum) self.count_query_ctb(self.column_dict,self.tag_dict,self.stbname,self.tbnum,self.rowNum) From 7891ff0f03e2e7cecd9d1615e53676d844853866 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 13 Jul 2022 11:31:28 +0800 Subject: [PATCH 11/56] fix: fix stop query issue --- source/client/src/clientEnv.c | 2 +- source/client/src/clientImpl.c | 2 +- source/util/src/tlockfree.c | 2 +- source/util/src/tsched.c | 18 +++++----- tests/script/api/stopquery.c | 63 ++++++++++++++++++++++++++++++---- 5 files changed, 70 insertions(+), 17 deletions(-) diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 6805e4d501..2173df5ead 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -42,7 +42,7 @@ volatile int32_t tscInitRes = 0; void initTscQhandle() { // init handle - tscQhandle = taosInitScheduler(4096, 5, "tsc"); + tscQhandle = taosInitScheduler(4096, 5, "tscQ"); } void cleanupTscQhandle() { diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 25ba63fd34..df1268858a 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -153,7 +153,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, *pRequest = createRequest(connId, TSDB_SQL_SELECT); if (*pRequest == NULL) { tscError("failed to malloc sqlObj, %s", sql); - return TSDB_CODE_TSC_OUT_OF_MEMORY; + return terrno; } (*pRequest)->sqlstr = taosMemoryMalloc(sqlLen + 1); diff --git a/source/util/src/tlockfree.c b/source/util/src/tlockfree.c index 69ab6c1a52..6f7b6f6901 100644 --- a/source/util/src/tlockfree.c +++ b/source/util/src/tlockfree.c @@ -44,7 +44,7 @@ void taosWLockLatch(SRWLatch *pLatch) { nLoops = 0; while (1) { oLatch = atomic_load_32(pLatch); - if (0 == oLatch) break; + if (oLatch == TD_RWLATCH_WRITE_FLAG) break; nLoops++; if (nLoops > 1000) { sched_yield(); diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c index 691a0d34d4..9abce966f5 100644 --- a/source/util/src/tsched.c +++ b/source/util/src/tsched.c @@ -129,7 +129,7 @@ void *taosProcessSchedQueue(void *scheduler) { while (1) { if ((ret = tsem_wait(&pSched->fullSem)) != 0) { uFatal("wait %s fullSem failed(%s)", pSched->label, strerror(errno)); - exit(ret); + ASSERT(0); } if (pSched->stop) { break; @@ -137,7 +137,7 @@ void *taosProcessSchedQueue(void *scheduler) { if ((ret = taosThreadMutexLock(&pSched->queueMutex)) != 0) { uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); - exit(ret); + ASSERT(0); } msg = pSched->queue[pSched->fullSlot]; @@ -146,12 +146,12 @@ void *taosProcessSchedQueue(void *scheduler) { if ((ret = taosThreadMutexUnlock(&pSched->queueMutex)) != 0) { uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); - exit(ret); + ASSERT(0); } if ((ret = tsem_post(&pSched->emptySem)) != 0) { uFatal("post %s emptySem failed(%s)", pSched->label, strerror(errno)); - exit(ret); + ASSERT(0); } if (msg.fp) @@ -174,12 +174,12 @@ void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { if ((ret = tsem_wait(&pSched->emptySem)) != 0) { uFatal("wait %s emptySem failed(%s)", pSched->label, strerror(errno)); - exit(ret); + ASSERT(0); } if ((ret = taosThreadMutexLock(&pSched->queueMutex)) != 0) { uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); - exit(ret); + ASSERT(0); } pSched->queue[pSched->emptySlot] = *pMsg; @@ -187,12 +187,12 @@ void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { if ((ret = taosThreadMutexUnlock(&pSched->queueMutex)) != 0) { uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); - exit(ret); + ASSERT(0); } if ((ret = tsem_post(&pSched->fullSem)) != 0) { uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno)); - exit(ret); + ASSERT(0); } } @@ -200,6 +200,8 @@ void taosCleanUpScheduler(void *param) { SSchedQueue *pSched = (SSchedQueue *)param; if (pSched == NULL) return; + uDebug("start to cleanup %s schedQsueue", pSched->label); + pSched->stop = true; for (int32_t i = 0; i < pSched->numOfThreads; ++i) { if (taosCheckPthreadValid(pSched->qthread[i])) { diff --git a/tests/script/api/stopquery.c b/tests/script/api/stopquery.c index a42e9e2d44..92baf43d85 100644 --- a/tests/script/api/stopquery.c +++ b/tests/script/api/stopquery.c @@ -85,10 +85,12 @@ static void sqExecSQLE(TAOS *taos, char *command) { taos_free_result(pSql); } +void sqError(char* prefix, const char* errMsg) { + fprintf(stderr, "%s error: %s\n", prefix, errMsg); +} void sqExit(char* prefix, const char* errMsg) { - fprintf(stderr, "%s error: %s\n", prefix, errMsg); - sleep(10000); + sqError(prefix, errMsg); exit(1); } @@ -208,7 +210,9 @@ void sqAsyncQueryCb(void *param, TAOS_RES *pRes, int code) { *qParam->end = 1; } } else { - sqExit("select", taos_errstr(pRes)); + sqError("select", taos_errstr(pRes)); + *qParam->end = 1; + taos_free_result(pRes); } } @@ -463,8 +467,6 @@ void *closeThreadFp(void *arg) { } } - - void *killThreadFp(void *arg) { SSP_CB_PARAM* qParam = (SSP_CB_PARAM*)arg; while (true) { @@ -477,6 +479,19 @@ void *killThreadFp(void *arg) { } } +void *cleanupThreadFp(void *arg) { + SSP_CB_PARAM* qParam = (SSP_CB_PARAM*)arg; + while (true) { + if (qParam->taos) { + usleep(rand() % 10000); + taos_cleanup(); + break; + } + usleep(1); + } +} + + int sqConCloseSyncQuery(bool fetch) { @@ -607,9 +622,40 @@ int sqConKillAsyncQuery(bool fetch) { CASE_LEAVE(); } +int sqConCleanupSyncQuery(bool fetch) { + CASE_ENTER(); + pthread_t qid, cid; + for (int32_t i = 0; i < runTimes; ++i) { + SSP_CB_PARAM param = {0}; + param.fetch = fetch; + pthread_create(&qid, NULL, syncQueryThreadFp, (void*)¶m); + pthread_create(&cid, NULL, cleanupThreadFp, (void*)¶m); + + pthread_join(qid, NULL); + pthread_join(cid, NULL); + } + CASE_LEAVE(); +} + +int sqConCleanupAsyncQuery(bool fetch) { + CASE_ENTER(); + pthread_t qid, cid; + for (int32_t i = 0; i < runTimes; ++i) { + SSP_CB_PARAM param = {0}; + param.fetch = fetch; + pthread_create(&qid, NULL, asyncQueryThreadFp, (void*)¶m); + pthread_create(&cid, NULL, cleanupThreadFp, (void*)¶m); + + pthread_join(qid, NULL); + pthread_join(cid, NULL); + } + CASE_LEAVE(); +} + void sqRunAllCase(void) { +#if 0 sqStopSyncQuery(false); sqStopSyncQuery(true); sqStopAsyncQuery(false); @@ -638,11 +684,16 @@ void sqRunAllCase(void) { sqConKillSyncQuery(false); sqConKillSyncQuery(true); -#if 0 sqConKillAsyncQuery(false); sqConKillAsyncQuery(true); #endif + sqConCleanupSyncQuery(false); + sqConCleanupSyncQuery(true); + sqConCleanupAsyncQuery(false); + sqConCleanupAsyncQuery(true); + + int32_t l = 5; while (l) { printf("%d\n", l--); From 65e653835209dd7e0cbaeb8ada6590586d5e1c57 Mon Sep 17 00:00:00 2001 From: "slzhou@taodata.com" Date: Wed, 13 Jul 2022 13:07:10 +0800 Subject: [PATCH 12/56] fix: optimize the scan only when interval window --- source/libs/planner/src/planOptimizer.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 1548ab0ee3..45f947e128 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -107,12 +107,15 @@ static bool scanPathOptMayBeOptimized(SLogicNode* pNode) { QUERY_NODE_LOGIC_PLAN_PARTITION != nodeType(pNode->pParent))) { return false; } - if (QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent) || + if ((QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent) && WINDOW_TYPE_INTERVAL == ((SWindowLogicNode*)pNode->pParent)->winType) || (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode->pParent) && pNode->pParent->pParent && - QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent->pParent))) { + QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent->pParent) && WINDOW_TYPE_INTERVAL == ((SWindowLogicNode*)pNode->pParent)->winType)) { return true; } - return !scanPathOptHaveNormalCol(((SAggLogicNode*)pNode->pParent)->pGroupKeys); + if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode->pParent)) { + return !scanPathOptHaveNormalCol(((SAggLogicNode*)pNode->pParent)->pGroupKeys); + } + return false; } static SNodeList* scanPathOptGetAllFuncs(SLogicNode* pNode) { From 480de34a0ed41f002fa7efee95c94ab6a6f4c055 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 14:13:12 +0800 Subject: [PATCH 13/56] enh: support alter debugflag in dnode --- source/common/src/tglobal.c | 34 +++++++++++++++ source/dnode/mnode/impl/src/mndDnode.c | 58 ++++++++++++++++++++------ source/libs/sync/src/syncMain.c | 4 +- 3 files changed, 81 insertions(+), 15 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 24bf4e5c2d..974146302c 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1117,10 +1117,44 @@ void taosCfgDynamicOptions(const char *option, const char *value) { if (strncasecmp(option, "debugFlag", 9) == 0) { int32_t flag = atoi(value); taosSetAllDebugFlag(flag); + return; } if (strcasecmp(option, "resetlog") == 0) { taosResetLog(); cfgDumpCfg(tsCfg, 0, false); + return; } + + if (strcasecmp(option, "monitor") == 0) { + int32_t monitor = atoi(value); + uInfo("monitor set from %d to %d", tsEnableMonitor, monitor); + tsEnableMonitor = monitor; + return; + } + + const char *options[] = { + "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", + "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tmrDebugFlag", + "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", + }; + int32_t *optionVars[] = { + &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, + &tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tmrDebugFlag, + &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, + }; + + int32_t optionSize = tListLen(options); + for (int32_t d = 0; d < optionSize; ++d) { + const char *optName = options[d]; + int32_t optLen = strlen(optName); + if (strncasecmp(option, optName, optLen) != 0) continue; + + int32_t flag = atoi(value); + uInfo("%s set from %d to %d", optName, *optionVars[d], flag); + *optionVars[d] = flag; + return; + } + + uError("failed to cfg dynamic option:%s value:%s", option, value); } diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 9ce108b789..3e5d378bb1 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -781,7 +781,13 @@ _OVER: } static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { - SMnode *pMnode = pReq->info.node; + SMnode *pMnode = pReq->info.node; + const char *options[] = { + "debugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", + "tsdbDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", + "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", + }; + int32_t optionSize = tListLen(options); SMCfgDnodeReq cfgReq = {0}; if (tDeserializeSMCfgDnodeReq(pReq->pCont, pReq->contLen, &cfgReq) != 0) { @@ -802,27 +808,52 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { SEpSet epSet = mndGetDnodeEpset(pDnode); mndReleaseDnode(pMnode, pDnode); + SDCfgDnodeReq dcfgReq = {0}; - if (strncasecmp(cfgReq.config, "debugFlag", 9) == 0) { + if (strcasecmp(cfgReq.config, "resetlog") == 0) { + strcpy(dcfgReq.config, "resetlog"); + } else if (strncasecmp(cfgReq.config, "monitor", 7) == 0) { const char *value = cfgReq.value; int32_t flag = atoi(value); if (flag <= 0) { - flag = atoi(cfgReq.config + 10); + flag = atoi(cfgReq.config + 8); } - if (flag <= 0 || flag > 255) { - mError("dnode:%d, failed to config debugFlag since value:%d", cfgReq.dnodeId, flag); + if (flag < 0 || flag > 2) { + mError("dnode:%d, failed to config monitor since value:%d", cfgReq.dnodeId, flag); terrno = TSDB_CODE_INVALID_CFG; return -1; } - strcpy(dcfgReq.config, "debugFlag"); + strcpy(dcfgReq.config, "monitor"); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); - } else if (strcasecmp(cfgReq.config, "resetlog") == 0) { - strcpy(dcfgReq.config, "resetlog"); } else { - terrno = TSDB_CODE_INVALID_CFG; - mError("dnode:%d, failed to config since %s", cfgReq.dnodeId, terrstr()); - return -1; + bool findOpt = false; + for (int32_t d = 0; d < optionSize; ++d) { + const char *optName = options[d]; + int32_t optLen = strlen(optName); + if (strncasecmp(cfgReq.config, optName, optLen) != 0) continue; + + const char *value = cfgReq.value; + int32_t flag = atoi(value); + if (flag <= 0) { + flag = atoi(cfgReq.config + optLen + 1); + } + if (flag <= 0 || flag > 255) { + mError("dnode:%d, failed to config %s since value:%d", cfgReq.dnodeId, optName, flag); + terrno = TSDB_CODE_INVALID_CFG; + return -1; + } + + tstrncpy(dcfgReq.config, optName, optLen + 1); + snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); + findOpt = true; + } + + if (!findOpt) { + terrno = TSDB_CODE_INVALID_CFG; + mError("dnode:%d, failed to config since %s", cfgReq.dnodeId, terrstr()); + return -1; + } } int32_t bufLen = tSerializeSDCfgDnodeReq(NULL, 0, &dcfgReq); @@ -831,13 +862,14 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (pBuf == NULL) return -1; tSerializeSDCfgDnodeReq(pBuf, bufLen, &dcfgReq); - mDebug("dnode:%d, send config req to dnode, app:%p", cfgReq.dnodeId, pReq->info.ahandle); + mInfo("dnode:%d, send config req to dnode, app:%p config:%s value:%s", cfgReq.dnodeId, pReq->info.ahandle, + dcfgReq.config, dcfgReq.value); SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen}; return tmsgSendReq(&epSet, &rpcMsg); } static int32_t mndProcessConfigDnodeRsp(SRpcMsg *pRsp) { - mDebug("config rsp from dnode, app:%p", pRsp->info.ahandle); + mInfo("config rsp from dnode, app:%p", pRsp->info.ahandle); return 0; } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index abc0f53611..bab64b6f11 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -293,7 +293,7 @@ int32_t syncLeaderTransferTo(int64_t rid, SNodeInfo newLeader) { int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) { if (pSyncNode->peersNum == 0) { - sError("only one replica, cannot leader transfer"); + sDebug("only one replica, cannot leader transfer"); terrno = TSDB_CODE_SYN_ONE_REPLICA; return -1; } @@ -307,7 +307,7 @@ int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader) { int32_t ret = 0; if (pSyncNode->replicaNum == 1) { - sError("only one replica, cannot leader transfer"); + sDebug("only one replica, cannot leader transfer"); terrno = TSDB_CODE_SYN_ONE_REPLICA; return -1; } From 339967a3579d6f2ac64a2145f3333d2c9bf96e67 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Wed, 13 Jul 2022 14:19:02 +0800 Subject: [PATCH 14/56] test: add test case into ci --- tests/system-test/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index ec31235241..5672571bb5 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -179,7 +179,7 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py #python3 ./test.py -f 7-tmq/tmqDnodeRestart.py #python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py -#python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb.py +python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb.py #------------querPolicy 2----------- From 649cf7e55dbe7182e914d377c6889f3ef49e7e81 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 13 Jul 2022 14:21:44 +0800 Subject: [PATCH 15/56] fix(query): support last_row(tags) for super table query. --- source/dnode/vnode/inc/vnode.h | 2 +- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 6 ++-- source/libs/executor/inc/executorimpl.h | 3 ++ source/libs/executor/src/cachescanoperator.c | 31 ++++++++++++++------ source/libs/executor/src/scanoperator.c | 4 --- source/libs/function/src/builtinsimpl.c | 13 ++++---- 6 files changed, 37 insertions(+), 22 deletions(-) diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 9f32964fa9..f0a6b6505d 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -138,7 +138,7 @@ void *tsdbGetIdx(SMeta *pMeta); void *tsdbGetIvtIdx(SMeta *pMeta); int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader); -int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds); +int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray* pTableUids); int32_t tsdbLastrowReaderClose(void *pReader); int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 5c09c7663f..5855468f31 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -104,7 +104,7 @@ int32_t tsdbLastrowReaderClose(void* pReader) { return TSDB_CODE_SUCCESS; } -int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds) { +int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) { if (pReader == NULL || pResBlock == NULL) { return TSDB_CODE_INVALID_PARA; } @@ -141,14 +141,15 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t // appended or not. if (internalResult) { pResBlock->info.rows -= 1; + taosArrayClear(pTableUidList); } saveOneRow(pRow, pResBlock, pr, slotIds); + taosArrayPush(pTableUidList, &pKeyInfo->uid); internalResult = true; lastKey = pRow->ts; } - // taosMemoryFree(pRow); tsdbCacheRelease(lruCache, h); } } else if (pr->type == LASTROW_RETRIEVE_TYPE_ALL) { @@ -171,6 +172,7 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema); saveOneRow(pRow, pResBlock, pr, slotIds); + taosArrayPush(pTableUidList, &pKeyInfo->uid); // taosMemoryFree(pRow); tsdbCacheRelease(lruCache, h); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index aab2f51421..3da8e298a6 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -319,6 +319,7 @@ typedef struct SLastrowScanInfo { void *pLastrowReader; SArray *pColMatchInfo; int32_t *pSlotIds; + SExprSupp pseudoExprSup; } SLastrowScanInfo; typedef enum EStreamScanMode { @@ -787,6 +788,8 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul void doSetOperatorCompleted(SOperatorInfo* pOperator); void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock); +int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, + SSDataBlock* pBlock, const char* idStr); void cleanupAggSup(SAggSupporter* pAggSup); void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 7b1351a024..0f6817cd6b 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -45,20 +45,20 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead int32_t numOfCols = 0; pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfCols, COL_MATCH_FROM_COL_ID); - int32_t* pCols = taosMemoryMalloc(numOfCols * sizeof(int32_t)); - for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) { - SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i); - pCols[i] = pColMatch->colId; - } - int32_t code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds); if (code != TSDB_CODE_SUCCESS) { goto _error; } - tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, taosArrayGetSize(pInfo->pColMatchInfo), + tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_SINGLE, pTableList, taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader); - taosMemoryFree(pCols); + + if (pScanNode->pScanPseudoCols != NULL) { + SExprSupp* pPseudoExpr = &pInfo->pseudoExprSup; + + pPseudoExpr->pExprInfo = createExprInfo(pScanNode->pScanPseudoCols, NULL, &pPseudoExpr->numOfExprs); + pPseudoExpr->pCtx = createSqlFunctionCtx(pPseudoExpr->pExprInfo, pPseudoExpr->numOfExprs, &pPseudoExpr->rowEntryInfoOffset); + } pOperator->name = "LastrowScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN; @@ -100,7 +100,20 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) { // check if it is a group by tbname if (size == taosArrayGetSize(pInfo->pTableList)) { blockDataCleanup(pInfo->pRes); - tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds); + SArray* pUidList = taosArrayInit(1, sizeof(tb_uid_t)); + int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pUidList); + if (code != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, code); + } + + // check for tag values + if (pInfo->pRes->info.rows > 0 && pInfo->pseudoExprSup.numOfExprs > 0) { + SExprSupp* pSup = &pInfo->pseudoExprSup; + pInfo->pRes->info.uid = *(tb_uid_t*) taosArrayGet(pUidList, 0); + addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pInfo->pRes, GET_TASKID(pTaskInfo)); + } + + doSetOperatorCompleted(pOperator); return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; } else { // todo fetch the result for each group diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 64c740decf..66703502eb 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -39,8 +39,6 @@ static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capac static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, const char* dbName); -static int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, - SSDataBlock* pBlock, const char* idStr); static bool processBlockWithProbability(const SSampleExecInfo* pInfo); bool processBlockWithProbability(const SSampleExecInfo* pInfo) { @@ -320,8 +318,6 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int int32_t dstSlotId = pExpr->base.resSchema.slotId; SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotId); - - colInfoDataEnsureCapacity(pColInfoData, pBlock->info.rows); colInfoDataCleanup(pColInfoData, pBlock->info.rows); int32_t functionId = pExpr->pExpr->_function.functionId; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index ccf28bfd78..c1143020f0 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -80,11 +80,12 @@ typedef struct STopBotRes { } STopBotRes; typedef struct SFirstLastRes { - bool hasResult; + bool hasResult; // used for last_row function only, isNullRes in SResultRowEntry can not be passed to downstream.So, // this attribute is required - bool isNull; + bool isNull; int32_t bytes; + int64_t ts; char buf[]; } SFirstLastRes; @@ -2951,6 +2952,7 @@ int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pResInfo); colDataAppend(pCol, pBlock->info.rows, pRes->buf, pRes->isNull||pResInfo->isNullRes); + // handle selectivity STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY)); setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows); @@ -5988,7 +5990,7 @@ int32_t lastrowFunction(SqlFunctionCtx* pCtx) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pInputCol = pInput->pData[0]; - int32_t type = pInputCol->info.type; + int32_t type = pInputCol->info.type; int32_t bytes = pInputCol->info.bytes; pInfo->bytes = bytes; @@ -5999,7 +6001,7 @@ int32_t lastrowFunction(SqlFunctionCtx* pCtx) { char* data = colDataGetData(pInputCol, i); TSKEY cts = getRowPTs(pInput->pPTS, i); - if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) < cts) { + if (pResInfo->numOfRes == 0 || pInfo->ts < cts) { if (colDataIsNull_s(pInputCol, i)) { pInfo->isNull = true; @@ -6012,8 +6014,7 @@ int32_t lastrowFunction(SqlFunctionCtx* pCtx) { memcpy(pInfo->buf, data, bytes); } - *(TSKEY*)(pInfo->buf + bytes) = cts; - + pInfo->ts = cts; pInfo->hasResult = true; pResInfo->numOfRes = 1; From 5ba8dbfba45f279fe843e6dd47029b8e6482731e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 14:32:52 +0800 Subject: [PATCH 16/56] enh: support alter debugflag in dnode --- tests/script/general/alter/dnode.sim | 67 ------------------- tests/script/jenkins/basic.txt | 5 ++ .../alter/cached_schema_after_alter.sim | 0 tests/script/tsim/alter/dnode.sim | 62 +++++++++++++++++ .../script/{general => tsim}/alter/table.sim | 2 - 5 files changed, 67 insertions(+), 69 deletions(-) delete mode 100644 tests/script/general/alter/dnode.sim rename tests/script/{general => tsim}/alter/cached_schema_after_alter.sim (100%) create mode 100644 tests/script/tsim/alter/dnode.sim rename tests/script/{general => tsim}/alter/table.sim (99%) diff --git a/tests/script/general/alter/dnode.sim b/tests/script/general/alter/dnode.sim deleted file mode 100644 index 64e8a17de0..0000000000 --- a/tests/script/general/alter/dnode.sim +++ /dev/null @@ -1,67 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -sql connect - -print ======== step1 -sql alter dnode 1 resetlog -sql alter dnode 1 monitor 1 - -sleep 3000 -sql select * from log.dn -if $rows <= 0 then - return -1 -endi - -print ======== step2 - -sql alter dnode 1 resetquerycache -sql alter dnode 1 debugFlag 135 -sql alter dnode 1 debugFlag 131 -sql alter dnode 1 monitor 0 -sql alter dnode 1 debugFlag 135 -sql alter dnode 1 monDebugFlag 135 -sql alter dnode 1 vDebugFlag 135 -sql alter dnode 1 mDebugFlag 135 -sql alter dnode 1 cDebugFlag 135 -sql alter dnode 1 httpDebugFlag 135 -sql alter dnode 1 qDebugflag 135 -sql alter dnode 1 sdbDebugFlag 135 -sql alter dnode 1 uDebugFlag 135 -sql alter dnode 1 tsdbDebugFlag 135 -sql alter dnode 1 sDebugflag 135 -sql alter dnode 1 rpcDebugFlag 135 -sql alter dnode 1 dDebugFlag 135 -sql alter dnode 1 mqttDebugFlag 135 -sql alter dnode 1 wDebugFlag 135 -sql alter dnode 1 tmrDebugFlag 135 -sql_error alter dnode 2 wDebugFlag 135 -sql_error alter dnode 2 tmrDebugFlag 135 - -print ======== step3 -sql_error alter $hostname1 debugFlag 135 -sql_error alter $hostname1 monDebugFlag 135 -sql_error alter $hostname1 vDebugFlag 135 -sql_error alter $hostname1 mDebugFlag 135 -sql_error alter dnode $hostname2 debugFlag 135 -sql_error alter dnode $hostname2 monDebugFlag 135 -sql_error alter dnode $hostname2 vDebugFlag 135 -sql_error alter dnode $hostname2 mDebugFlag 135 -sql alter dnode $hostname1 debugFlag 135 -sql alter dnode $hostname1 monDebugFlag 135 -sql alter dnode $hostname1 vDebugFlag 135 -sql alter dnode $hostname1 tmrDebugFlag 131 - -print ======== step4 -sql_error sql alter dnode 1 balance 0 -sql_error sql alter dnode 1 balance vnode:1-dnode:1 -sql_error sql alter dnode 1 balance "vnode:1" -sql_error sql alter dnode 1 balance "vnode:1-dnode:1" -sql_error sql alter dnode 1 balance "dnode:1-vnode:1" -sql_error sql alter dnode 1 balance "dnode:1-" -sql_error sql alter dnode 1 balance "vnode:2-dnod" -sql alter dnode 1 balance "vnode:2-dnode:1" -x step4 -step4: - -print ======= over -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 50d4c04a93..13a28aa527 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -1,6 +1,11 @@ #======================b1-start=============== +# ---- alter +./test.sh -f tsim/alter/cached_schema_after_alter.sim +./test.sh -f tsim/alter/dnode.sim +#./test.sh -f tsim/alter/table.sim + # ---- user ./test.sh -f tsim/user/basic.sim ./test.sh -f tsim/user/password.sim diff --git a/tests/script/general/alter/cached_schema_after_alter.sim b/tests/script/tsim/alter/cached_schema_after_alter.sim similarity index 100% rename from tests/script/general/alter/cached_schema_after_alter.sim rename to tests/script/tsim/alter/cached_schema_after_alter.sim diff --git a/tests/script/tsim/alter/dnode.sim b/tests/script/tsim/alter/dnode.sim new file mode 100644 index 0000000000..35620f17aa --- /dev/null +++ b/tests/script/tsim/alter/dnode.sim @@ -0,0 +1,62 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ======== step1 +sql alter dnode 1 'resetlog' +sql alter dnode 1 'monitor' '1' +sql alter dnode 1 'monitor' '0' +sql alter dnode 1 'monitor 1' +sql alter dnode 1 'monitor 0' + +print ======== step2 +sql_error alter dnode 1 'resetquerycache' +sql alter dnode 1 'debugFlag 135' +sql alter dnode 1 'dDebugFlag 131' +sql alter dnode 1 'vDebugFlag 131' +sql alter dnode 1 'mDebugFlag 131' +sql alter dnode 1 'wDebugFlag 131' +sql alter dnode 1 'sDebugFlag 131' +sql alter dnode 1 'tsdbDebugFlag 131' +sql alter dnode 1 'tqDebugFlag 131' +sql alter dnode 1 'fsDebugFlag 131' +sql alter dnode 1 'udfDebugFlag 131' +sql alter dnode 1 'smaDebugFlag 131' +sql alter dnode 1 'idxDebugFlag 131' +sql alter dnode 1 'tmrDebugFlag 131' +sql alter dnode 1 'uDebugFlag 131' +sql alter dnode 1 'smaDebugFlag 131' +sql alter dnode 1 'rpcDebugFlag 131' +sql alter dnode 1 'qDebugFlag 131' + +sql_error alter dnode 2 'wDebugFlag 135' +sql_error alter dnode 2 'tmrDebugFlag 135' + +print ======== step3 +sql_error alter $hostname1 debugFlag 135 +sql_error alter $hostname1 monDebugFlag 135 +sql_error alter $hostname1 vDebugFlag 135 +sql_error alter $hostname1 mDebugFlag 135 +sql_error alter dnode $hostname2 debugFlag 135 +sql_error alter dnode $hostname2 monDebugFlag 135 +sql_error alter dnode $hostname2 vDebugFlag 135 +sql_error alter dnode $hostname2 mDebugFlag 135 +sql_error alter dnode $hostname1 debugFlag 135 +sql_error alter dnode $hostname1 monDebugFlag 135 +sql_error alter dnode $hostname1 vDebugFlag 135 +sql_error alter dnode $hostname1 tmrDebugFlag 131 + +print ======== step4 +sql_error sql alter dnode 1 balance 0 +sql_error sql alter dnode 1 balance vnode:1-dnode:1 +sql_error sql alter dnode 1 balance "vnode:1" +sql_error sql alter dnode 1 balance "vnode:1-dnode:1" +sql_error sql alter dnode 1 balance "dnode:1-vnode:1" +sql_error sql alter dnode 1 balance "dnode:1-" +sql_error sql alter dnode 1 balance "vnode:2-dnod" +sql alter dnode 1 balance "vnode:2-dnode:1" -x step4 +step4: + +print ======= over +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/alter/table.sim b/tests/script/tsim/alter/table.sim similarity index 99% rename from tests/script/general/alter/table.sim rename to tests/script/tsim/alter/table.sim index 9ca2f60bdc..cc995d171f 100644 --- a/tests/script/general/alter/table.sim +++ b/tests/script/tsim/alter/table.sim @@ -315,9 +315,7 @@ endi print ======== step9 print ======== step10 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 sql use d1 sql describe tb From db6eca9bef283b7148f6d31dcce3d19ce5a9e665 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 17/56] fix test cases --- tests/system-test/2-query/arccos.py | 92 ++++++++++++++--------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/tests/system-test/2-query/arccos.py b/tests/system-test/2-query/arccos.py index e7e5ecb114..d5656d9104 100644 --- a/tests/system-test/2-query/arccos.py +++ b/tests/system-test/2-query/arccos.py @@ -9,14 +9,14 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) self.PI =3.1415926 - + def prepare_datas(self): tdSql.execute( '''create table stb1 @@ -24,7 +24,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -66,14 +66,14 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_result_auto_acos(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -93,7 +93,7 @@ class TDTestCase: if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): check_status = False elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False + check_status = False else: pass if not check_status: @@ -101,7 +101,7 @@ class TDTestCase: sys.exit(1) else: tdLog.info("acos value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - + def test_errors(self): error_sql_lists = [ "select acos from t1", @@ -135,42 +135,42 @@ class TDTestCase: ] for error_sql in error_sql_lists: tdSql.error(error_sql) - + def support_types(self): type_error_sql_lists = [ - "select acos(ts) from t1" , + "select acos(ts) from t1" , "select acos(c7) from t1", "select acos(c8) from t1", "select acos(c9) from t1", - "select acos(ts) from ct1" , + "select acos(ts) from ct1" , "select acos(c7) from ct1", "select acos(c8) from ct1", "select acos(c9) from ct1", - "select acos(ts) from ct3" , + "select acos(ts) from ct3" , "select acos(c7) from ct3", "select acos(c8) from ct3", "select acos(c9) from ct3", - "select acos(ts) from ct4" , + "select acos(ts) from ct4" , "select acos(c7) from ct4", "select acos(c8) from ct4", "select acos(c9) from ct4", - "select acos(ts) from stb1" , + "select acos(ts) from stb1" , "select acos(c7) from stb1", "select acos(c8) from stb1", "select acos(c9) from stb1" , - "select acos(ts) from stbbb1" , + "select acos(ts) from stbbb1" , "select acos(c7) from stbbb1", "select acos(ts) from tbname", "select acos(c9) from tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ "select acos(c1) from t1", "select acos(c2) from t1", @@ -200,16 +200,16 @@ class TDTestCase: "select acos(c5) from stb1", "select acos(c6) from stb1", - "select acos(c6) as alisb from stb1", - "select acos(c6) alisb from stb1", + "select acos(c6) as alisb from stb1", + "select acos(c6) alisb from stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - + def basic_acos_function(self): - # basic query + # basic query tdSql.query("select c1 from ct3") tdSql.checkRows(0) tdSql.query("select c1 from t1") @@ -250,7 +250,7 @@ class TDTestCase: tdSql.checkData(5, 5, None) self.check_result_auto_acos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select acos(abs(c1)), acos(abs(c2)) ,acos(abs(c3)), acos(abs(c4)), acos(abs(c5)) from t1") - + # used for sub table tdSql.query("select c2 ,acos(c2) from ct1") tdSql.checkData(0, 1, None) @@ -266,7 +266,7 @@ class TDTestCase: tdSql.checkData(5 , 2, None) self.check_result_auto_acos( "select c1, c2, c3 , c4, c5 from ct1", "select acos(c1), acos(c2) ,acos(c3), acos(c4), acos(c5) from ct1") - + # nest query for acos functions tdSql.query("select c4 , acos(c4) ,acos(acos(c4)) , acos(acos(acos(c4))) from ct1;") tdSql.checkData(0 , 0 , 88) @@ -284,21 +284,21 @@ class TDTestCase: tdSql.checkData(11 , 2 , None) tdSql.checkData(11 , 3 , None) - # used for stable table - + # used for stable table + tdSql.query("select acos(c1) from stb1") tdSql.checkRows(25) - + # used for not exists table tdSql.error("select acos(c1) from stbbb1") tdSql.error("select acos(c1) from tbname") tdSql.error("select acos(c1) from ct5") - # mix with common col + # mix with common col tdSql.query("select c1, acos(c1) from ct1") tdSql.query("select c2, acos(c2) from ct4") - + # mix with common functions tdSql.query("select c1, acos(c1),acos(c1), acos(acos(c1)) from ct4 ") @@ -306,7 +306,7 @@ class TDTestCase: tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,None) tdSql.checkData(3 , 2 ,None) @@ -327,8 +327,8 @@ class TDTestCase: tdSql.query("select max(c5), count(c5) from stb1") tdSql.query("select max(c5), count(c5) from ct1") - - # # bug fix for compute + + # # bug fix for compute tdSql.query("select c1, acos(c1) -0 ,acos(c1-4)-0 from ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) @@ -397,10 +397,10 @@ class TDTestCase: tdSql.checkData(0,3,0.000000000) tdSql.checkData(0,4,-0.100000000) tdSql.checkData(0,5,2) - + def pow_Arithmetic(self): pass - + def check_boundary_values(self): PI=3.1415926 @@ -429,11 +429,11 @@ class TDTestCase: f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) self.check_result_auto_acos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select acos(abs(c1)), acos(abs(c2)) ,acos(abs(c3)), acos(abs(c4)), acos(abs(c5)) from sub1_bound") - + self.check_result_auto_acos( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select acos(c1), acos(c2) ,acos(c3), acos(c3), acos(c2) ,acos(c1) from sub1_bound") self.check_result_auto_acos("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select acos(abs(c1)) from sub1_bound" ) - + # check basic elem for table per row tdSql.query("select acos(abs(c1)) ,acos(abs(c2)) , acos(abs(c3)) , acos(abs(c4)), acos(abs(c5)), acos(abs(c6)) from sub1_bound ") tdSql.checkData(0,0,None) @@ -492,41 +492,41 @@ class TDTestCase: self.check_result_auto_acos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select acos(t1) ,acos(c5) from stb1 where c1 > 0 order by tbname" ) self.check_result_auto_acos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select acos(t1) , acos(c5) from stb1 where c1 > 0 order by tbname" ) pass - - + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: acos basic query ============") + tdLog.printNoPrefix("==========step4: acos basic query ============") self.basic_acos_function() - tdLog.printNoPrefix("==========step5: big number acos query ============") + tdLog.printNoPrefix("==========step5: big number acos query ============") self.test_big_number() - tdLog.printNoPrefix("==========step6: acos boundary query ============") + tdLog.printNoPrefix("==========step6: acos boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step7: acos filter query ============") + tdLog.printNoPrefix("==========step7: acos filter query ============") self.abs_func_filter() - tdLog.printNoPrefix("==========step7: acos filter query ============") + tdLog.printNoPrefix("==========step7: acos filter query ============") self.abs_func_filter() From adbc50140335c793b14bde5ca14fad899d55a0fd Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 18/56] fix test cases --- tests/system-test/2-query/queryQnode.py | 54 ++++++++++++------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/tests/system-test/2-query/queryQnode.py b/tests/system-test/2-query/queryQnode.py index 3fdc09478d..8bdb99deec 100644 --- a/tests/system-test/2-query/queryQnode.py +++ b/tests/system-test/2-query/queryQnode.py @@ -45,7 +45,7 @@ class TDTestCase: case1: limit offset base function test case2: offset return valid ''' - return + return def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -71,7 +71,7 @@ class TDTestCase: # self.create_tables(); self.ts = 1500000000000 - # stop + # stop def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) @@ -82,7 +82,7 @@ class TDTestCase: def newcur(self,host,cfg): user = "root" password = "taosdata" - port =6030 + port =6030 con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) cur=con.cursor() print(cur) @@ -92,7 +92,7 @@ class TDTestCase: def create_tables(self,host,dbname,stbname,count): buildPath = self.getBuildPath() config = buildPath+ "../sim/dnode1/cfg/" - + tsql=self.newcur(host,config) tsql.execute("use %s" %dbname) @@ -111,7 +111,7 @@ class TDTestCase: tsql.execute(sql) sql = pre_create # print(time.time()) - # end sql + # end sql if sql != pre_create: # print(sql) tsql.execute(sql) @@ -124,7 +124,7 @@ class TDTestCase: def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childcount): buildPath = self.getBuildPath() config = buildPath+ "../sim/dnode1/cfg/" - + tsql=self.newcur(host,config) tdLog.debug("create database %s"%dbname) tsql.execute("drop database if exists %s"%dbname) @@ -134,7 +134,7 @@ class TDTestCase: threads = [] for i in range(threadNumbers): tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i)) - threads.append(thd.Thread(target=self.create_tables, args=(host, dbname, stbname+"%d"%i, count,))) + threads.append(thd.Thread(target=self.create_tables, args=(host, dbname, stbname+"%d"%i, count,))) start_time = time.time() for tr in threads: tr.start() @@ -144,7 +144,7 @@ class TDTestCase: spendTime=end_time-start_time speedCreate=threadNumbers*count/spendTime tdLog.debug("spent %.2fs to create %d stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,threadNumbers,threadNumbers*count,speedCreate)) - + return # def create_tables(self,host,dbname,stbname,vgroups,tcountStart,tcountStop): @@ -171,7 +171,7 @@ class TDTestCase: # print(sql) tsql.execute(sql) sql = "insert into %s_%d values " %(stbname,i) - # end sql + # end sql if sql != pre_insert: # print(sql) print(len(sql)) @@ -186,7 +186,7 @@ class TDTestCase: def mutiThread_insert_data(self, host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount): buildPath = self.getBuildPath() config = buildPath+ "../sim/dnode1/cfg/" - + tsql=self.newcur(host,config) tdLog.debug("ready to inser data") @@ -195,7 +195,7 @@ class TDTestCase: threads = [] for i in range(threadNumbers): # tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i)) - threads.append(thd.Thread(target=self.insert_data, args=(host, dbname, stbname+"%d"%i, chilCount, ts_start, childrowcount,))) + threads.append(thd.Thread(target=self.insert_data, args=(host, dbname, stbname+"%d"%i, chilCount, ts_start, childrowcount,))) start_time = time.time() for tr in threads: tr.start() @@ -226,10 +226,10 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) taosBenchbin = buildPath+ "/build/bin/taosBenchmark" os.system("%s -f %s -y " %(taosBenchbin,jsonFile)) - + return def taosBenchCreate(self,host,dropdb,dbname,stbname,vgroups,processNumbers,count): - + # count=50000 buildPath = self.getBuildPath() config = buildPath+ "../sim/dnode1/cfg/" @@ -243,7 +243,7 @@ class TDTestCase: # tsql.getResult("show databases") # print(tdSql.queryResult) tsql.execute("use %s" %dbname) - + threads = [] for i in range(processNumbers): jsonfile="1-insert/Vgroups%d%d.json"%(vgroups,i) @@ -254,7 +254,7 @@ class TDTestCase: os.system("sed -i 's/\"childtable_count\": 10000,/\"childtable_count\": %d,/g' %s "%(count,jsonfile)) os.system("sed -i 's/\"name\": \"stb1\",/\"name\": \"%s%d\",/g' %s "%(stbname,i,jsonfile)) os.system("sed -i 's/\"childtable_prefix\": \"stb1_\",/\"childtable_prefix\": \"%s%d_\",/g' %s "%(stbname,i,jsonfile)) - threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,))) + threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,))) start_time = time.time() for tr in threads: tr.start() @@ -276,8 +276,8 @@ class TDTestCase: for i in range(stableCount): tdSql.query("select count(*) from %s%d"%(stbname,i)) tdSql.checkData(0,0,rowsPerSTable) - return - + return + # test case : Switch back and forth among the three queryPolicy(1\2\3) def test_case1(self): self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 2, 1*10) @@ -303,7 +303,7 @@ class TDTestCase: tdSql.execute("reset query cache") tdSql.query("select max(c1) from stb10;") tdSql.checkData(0, 0, "%s"%maxQnode) - tdSql.query("select min(c1) from stb11;") + tdSql.query("select min(c1) from stb11;") tdSql.checkData(0, 0, "%s"%minQnode) tdSql.query("select c0,c1 from stb11_1 where (c0>1000) union select c0,c1 from stb11_1 where c0>2000;") unionVnode=tdSql.queryResult @@ -352,8 +352,8 @@ class TDTestCase: tdSql.execute("reset query cache") tdSql.query("select max(c1) from stb10;") assert maxQnode==tdSql.getData(0,0) - tdSql.query("select min(c1) from stb11;") - assert minQnode==tdSql.getData(0,0) + tdSql.query("select min(c1) from stb11;") + assert minQnode==tdSql.getData(0,0) tdSql.error("select c0,c1 from stb11_1 where (c0>1000) union select c0,c1 from stb11_1 where c0>2000;") tdSql.error("select c0,c1 from stb11_1 where (c0>1000) union all select c0,c1 from stb11_1 where c0>2000;") @@ -417,7 +417,7 @@ class TDTestCase: tdLog.exit("alter queryPolicy to %d failed"%queryPolicy) tdSql.execute("use db1;") tdSql.error("select max(c1) from stb10;") - tdSql.error("select min(c1) from stb11;") + tdSql.error("select min(c1) from stb11;") tdSql.error("select c0,c1 from stb11_1 where (c0>1000) union select c0,c1 from stb11_1 where c0>2000;") tdSql.error("select c0,c1 from stb11_1 where (c0>1000) union all select c0,c1 from stb11_1 where c0>2000;") @@ -452,20 +452,20 @@ class TDTestCase: tdSql.execute("reset query cache") tdSql.error("select max(c1) from stb10;") - tdSql.error("select min(c1) from stb11;") + tdSql.error("select min(c1) from stb11;") tdSql.error("select c0,c1 from stb11_1 where (c0>1000) union select c0,c1 from stb11_1 where c0>2000;") tdSql.error("select c0,c1 from stb11_1 where (c0>1000) union all select c0,c1 from stb11_1 where c0>2000;") - # run case + # run case def run(self): # test qnode - tdLog.debug(" test_case1 ............ [start]") + tdLog.debug(" test_case1 ............ [start]") self.test_case1() tdLog.debug(" test_case1 ............ [OK]") - tdLog.debug(" test_case2 ............ [start]") + tdLog.debug(" test_case2 ............ [start]") self.test_case2() tdLog.debug(" test_case2 ............ [OK]") - tdLog.debug(" test_case3 ............ [start]") + tdLog.debug(" test_case3 ............ [start]") self.test_case3() tdLog.debug(" test_case3 ............ [OK]") @@ -475,7 +475,7 @@ class TDTestCase: tdSql.close() tdLog.success(f"{__file__} successfully executed") - return + return # # add case with filename # From dbe21e748ab503faa856d41949ce7321e7ccad75 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 19/56] fix test cases --- tests/system-test/2-query/arcsin.py | 90 ++++++++++++++--------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/tests/system-test/2-query/arcsin.py b/tests/system-test/2-query/arcsin.py index 80c89a47ab..31185ffcaa 100644 --- a/tests/system-test/2-query/arcsin.py +++ b/tests/system-test/2-query/arcsin.py @@ -9,14 +9,14 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) self.PI =3.1415926 - + def prepare_datas(self): tdSql.execute( '''create table stb1 @@ -24,7 +24,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -66,14 +66,14 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_result_auto_asin(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -93,7 +93,7 @@ class TDTestCase: if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): check_status = False elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False + check_status = False else: pass if not check_status: @@ -101,7 +101,7 @@ class TDTestCase: sys.exit(1) else: tdLog.info("asin value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - + def test_errors(self): error_sql_lists = [ "select asin from t1", @@ -135,42 +135,42 @@ class TDTestCase: ] for error_sql in error_sql_lists: tdSql.error(error_sql) - + def support_types(self): type_error_sql_lists = [ - "select asin(ts) from t1" , + "select asin(ts) from t1" , "select asin(c7) from t1", "select asin(c8) from t1", "select asin(c9) from t1", - "select asin(ts) from ct1" , + "select asin(ts) from ct1" , "select asin(c7) from ct1", "select asin(c8) from ct1", "select asin(c9) from ct1", - "select asin(ts) from ct3" , + "select asin(ts) from ct3" , "select asin(c7) from ct3", "select asin(c8) from ct3", "select asin(c9) from ct3", - "select asin(ts) from ct4" , + "select asin(ts) from ct4" , "select asin(c7) from ct4", "select asin(c8) from ct4", "select asin(c9) from ct4", - "select asin(ts) from stb1" , + "select asin(ts) from stb1" , "select asin(c7) from stb1", "select asin(c8) from stb1", "select asin(c9) from stb1" , - "select asin(ts) from stbbb1" , + "select asin(ts) from stbbb1" , "select asin(c7) from stbbb1", "select asin(ts) from tbname", "select asin(c9) from tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ "select asin(c1) from t1", "select asin(c2) from t1", @@ -200,16 +200,16 @@ class TDTestCase: "select asin(c5) from stb1", "select asin(c6) from stb1", - "select asin(c6) as alisb from stb1", - "select asin(c6) alisb from stb1", + "select asin(c6) as alisb from stb1", + "select asin(c6) alisb from stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - + def basic_asin_function(self): - # basic query + # basic query tdSql.query("select c1 from ct3") tdSql.checkRows(0) tdSql.query("select c1 from t1") @@ -250,7 +250,7 @@ class TDTestCase: tdSql.checkData(5, 5, None) self.check_result_auto_asin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select asin(abs(c1)), asin(abs(c2)) ,asin(abs(c3)), asin(abs(c4)), asin(abs(c5)) from t1") - + # used for sub table tdSql.query("select c2 ,asin(c2) from ct1") tdSql.checkData(0, 1, None) @@ -266,7 +266,7 @@ class TDTestCase: tdSql.checkData(5 , 2, None) self.check_result_auto_asin( "select c1, c2, c3 , c4, c5 from ct1", "select asin(c1), asin(c2) ,asin(c3), asin(c4), asin(c5) from ct1") - + # nest query for asin functions tdSql.query("select c4 , asin(c4) ,asin(asin(c4)) , asin(asin(asin(c4))) from ct1;") tdSql.checkData(0 , 0 , 88) @@ -284,21 +284,21 @@ class TDTestCase: tdSql.checkData(11 , 2 , None) tdSql.checkData(11 , 3 , None) - # used for stable table - + # used for stable table + tdSql.query("select asin(c1) from stb1") tdSql.checkRows(25) - + # used for not exists table tdSql.error("select asin(c1) from stbbb1") tdSql.error("select asin(c1) from tbname") tdSql.error("select asin(c1) from ct5") - # mix with common col + # mix with common col tdSql.query("select c1, asin(c1) from ct1") tdSql.query("select c2, asin(c2) from ct4") - + # mix with common functions tdSql.query("select c1, asin(c1),asin(c1), asin(asin(c1)) from ct4 ") @@ -306,7 +306,7 @@ class TDTestCase: tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,None) tdSql.checkData(3 , 2 ,None) @@ -327,8 +327,8 @@ class TDTestCase: tdSql.query("select max(c5), count(c5) from stb1") tdSql.query("select max(c5), count(c5) from ct1") - - # # bug fix for compute + + # # bug fix for compute tdSql.query("select c1, asin(c1) -0 ,asin(c1-4)-0 from ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) @@ -397,10 +397,10 @@ class TDTestCase: tdSql.checkData(0,3,1.000000000) tdSql.checkData(0,4,0.900000000) tdSql.checkData(0,5,2) - + def pow_Arithmetic(self): pass - + def check_boundary_values(self): PI=3.1415926 @@ -429,11 +429,11 @@ class TDTestCase: f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) self.check_result_auto_asin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select asin(abs(c1)), asin(abs(c2)) ,asin(abs(c3)), asin(abs(c4)), asin(abs(c5)) from sub1_bound") - + self.check_result_auto_asin( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select asin(c1), asin(c2) ,asin(c3), asin(c3), asin(c2) ,asin(c1) from sub1_bound") self.check_result_auto_asin("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select asin(abs(c1)) from sub1_bound" ) - + # check basic elem for table per row tdSql.query("select asin(abs(c1)) ,asin(abs(c2)) , asin(abs(c3)) , asin(abs(c4)), asin(abs(c5)), asin(abs(c6)) from sub1_bound ") tdSql.checkData(0,0,None) @@ -492,37 +492,37 @@ class TDTestCase: self.check_result_auto_asin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select asin(t1) ,asin(c5) from stb1 where c1 > 0 order by tbname" ) self.check_result_auto_asin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select asin(t1) , asin(c5) from stb1 where c1 > 0 order by tbname" ) pass - - + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: asin basic query ============") + tdLog.printNoPrefix("==========step4: asin basic query ============") self.basic_asin_function() - tdLog.printNoPrefix("==========step5: big number asin query ============") + tdLog.printNoPrefix("==========step5: big number asin query ============") self.test_big_number() - tdLog.printNoPrefix("==========step6: asin boundary query ============") + tdLog.printNoPrefix("==========step6: asin boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step7: asin filter query ============") + tdLog.printNoPrefix("==========step7: asin filter query ============") self.abs_func_filter() From 860091f4841c84a6f82e248fab2bdfddc219b64d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 20/56] fix test cases --- tests/system-test/2-query/diff.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py index 0d8b0de3dc..30b588fa97 100644 --- a/tests/system-test/2-query/diff.py +++ b/tests/system-test/2-query/diff.py @@ -50,7 +50,7 @@ class TDTestCase: tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, None) - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table stb_1 using stb tags('beijing')") tdSql.execute( @@ -115,7 +115,7 @@ class TDTestCase: tdSql.query("select diff(col6) from stb_1") tdSql.checkRows(10) - tdSql.execute('''create table stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute('''create table stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table stb1_1 using stb tags('shanghai')") From b877cbbec97d81ed033ee6275388eef266455181 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 21/56] fix test cases --- tests/system-test/2-query/arctan.py | 90 ++++++++++++++--------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/tests/system-test/2-query/arctan.py b/tests/system-test/2-query/arctan.py index db59693425..4c729bd521 100644 --- a/tests/system-test/2-query/arctan.py +++ b/tests/system-test/2-query/arctan.py @@ -9,13 +9,13 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - + def prepare_datas(self): tdSql.execute( '''create table stb1 @@ -23,7 +23,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -65,14 +65,14 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_result_auto_atan(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -90,7 +90,7 @@ class TDTestCase: if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): check_status = False elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False + check_status = False else: pass if not check_status: @@ -98,7 +98,7 @@ class TDTestCase: sys.exit(1) else: tdLog.info("atan value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - + def test_errors(self): error_sql_lists = [ "select atan from t1", @@ -132,42 +132,42 @@ class TDTestCase: ] for error_sql in error_sql_lists: tdSql.error(error_sql) - + def support_types(self): type_error_sql_lists = [ - "select atan(ts) from t1" , + "select atan(ts) from t1" , "select atan(c7) from t1", "select atan(c8) from t1", "select atan(c9) from t1", - "select atan(ts) from ct1" , + "select atan(ts) from ct1" , "select atan(c7) from ct1", "select atan(c8) from ct1", "select atan(c9) from ct1", - "select atan(ts) from ct3" , + "select atan(ts) from ct3" , "select atan(c7) from ct3", "select atan(c8) from ct3", "select atan(c9) from ct3", - "select atan(ts) from ct4" , + "select atan(ts) from ct4" , "select atan(c7) from ct4", "select atan(c8) from ct4", "select atan(c9) from ct4", - "select atan(ts) from stb1" , + "select atan(ts) from stb1" , "select atan(c7) from stb1", "select atan(c8) from stb1", "select atan(c9) from stb1" , - "select atan(ts) from stbbb1" , + "select atan(ts) from stbbb1" , "select atan(c7) from stbbb1", "select atan(ts) from tbname", "select atan(c9) from tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ "select atan(c1) from t1", "select atan(c2) from t1", @@ -197,16 +197,16 @@ class TDTestCase: "select atan(c5) from stb1", "select atan(c6) from stb1", - "select atan(c6) as alisb from stb1", - "select atan(c6) alisb from stb1", + "select atan(c6) as alisb from stb1", + "select atan(c6) alisb from stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - + def basic_atan_function(self): - # basic query + # basic query tdSql.query("select c1 from ct3") tdSql.checkRows(0) tdSql.query("select c1 from t1") @@ -247,7 +247,7 @@ class TDTestCase: tdSql.checkData(5, 5, None) self.check_result_auto_atan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select atan(abs(c1)), atan(abs(c2)) ,atan(abs(c3)), atan(abs(c4)), atan(abs(c5)) from t1") - + # used for sub table tdSql.query("select c2 ,atan(c2) from ct1") tdSql.checkData(0, 1, 1.570785077) @@ -263,7 +263,7 @@ class TDTestCase: tdSql.checkData(5 , 2, None) self.check_result_auto_atan( "select c1, c2, c3 , c4, c5 from ct1", "select atan(c1), atan(c2) ,atan(c3), atan(c4), atan(c5) from ct1") - + # nest query for atan functions tdSql.query("select c4 , atan(c4) ,atan(atan(c4)) , atan(atan(atan(c4))) from ct1;") tdSql.checkData(0 , 0 , 88) @@ -281,21 +281,21 @@ class TDTestCase: tdSql.checkData(11 , 2 , -1.000958403) tdSql.checkData(11 , 3 , -0.785877135) - # used for stable table - + # used for stable table + tdSql.query("select atan(c1) from stb1") tdSql.checkRows(25) - + # used for not exists table tdSql.error("select atan(c1) from stbbb1") tdSql.error("select atan(c1) from tbname") tdSql.error("select atan(c1) from ct5") - # mix with common col + # mix with common col tdSql.query("select c1, atan(c1) from ct1") tdSql.query("select c2, atan(c2) from ct4") - + # mix with common functions tdSql.query("select c1, atan(c1),atan(c1), atan(atan(c1)) from ct4 ") @@ -303,7 +303,7 @@ class TDTestCase: tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,1.405647649) tdSql.checkData(3 , 2 ,1.405647649) @@ -324,8 +324,8 @@ class TDTestCase: tdSql.query("select max(c5), count(c5) from stb1") tdSql.query("select max(c5), count(c5) from ct1") - - # # bug fix for compute + + # # bug fix for compute tdSql.query("select c1, atan(c1) -0 ,atan(c1-4)-0 from ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) @@ -394,10 +394,10 @@ class TDTestCase: tdSql.checkData(0,3,0.000000000) tdSql.checkData(0,4,-0.100000000) tdSql.checkData(0,5,0.000000000) - + def pow_Arithmetic(self): pass - + def check_boundary_values(self): PI=3.1415926 @@ -426,11 +426,11 @@ class TDTestCase: f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) self.check_result_auto_atan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select atan(abs(c1)), atan(abs(c2)) ,atan(abs(c3)), atan(abs(c4)), atan(abs(c5)) from sub1_bound") - + self.check_result_auto_atan( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select atan(c1), atan(c2) ,atan(c3), atan(c3), atan(c2) ,atan(c1) from sub1_bound") self.check_result_auto_atan("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select atan(abs(c1)) from sub1_bound" ) - + # check basic elem for table per row tdSql.query("select atan(abs(c1)) ,atan(abs(c2)) , atan(abs(c3)) , atan(abs(c4)), atan(abs(c5)), atan(abs(c6)) from sub1_bound ") tdSql.checkData(0,0,math.atan(2147483647)) @@ -490,36 +490,36 @@ class TDTestCase: self.check_result_auto_atan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select atan(t1) ,atan(c5) from stb1 where c1 > 0 order by tbname" ) self.check_result_auto_atan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select atan(t1) , atan(c5) from stb1 where c1 > 0 order by tbname" ) pass - + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: atan basic query ============") + tdLog.printNoPrefix("==========step4: atan basic query ============") self.basic_atan_function() - tdLog.printNoPrefix("==========step5: big number atan query ============") + tdLog.printNoPrefix("==========step5: big number atan query ============") self.test_big_number() - tdLog.printNoPrefix("==========step6: atan boundary query ============") + tdLog.printNoPrefix("==========step6: atan boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step7: atan filter query ============") + tdLog.printNoPrefix("==========step7: atan filter query ============") self.abs_func_filter() @@ -527,7 +527,7 @@ class TDTestCase: self.support_super_table_test() - + def stop(self): tdSql.close() From 5917c740368efc9add7d9b248a3f7db5c47c5893 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 22/56] fix test cases --- tests/system-test/2-query/elapsed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/elapsed.py b/tests/system-test/2-query/elapsed.py index d2f1331e00..333c60286e 100644 --- a/tests/system-test/2-query/elapsed.py +++ b/tests/system-test/2-query/elapsed.py @@ -894,7 +894,7 @@ class TDTestCase: tdSql.query(sql_common) results= query_datas[0] if operator == "+": - for data in query_datas[1:]: + for data in query_datas[1:]: results += data tdSql.checkData(0,0,results) From 2bc95eeb5f0b3426dabfe6dc7bb8a015552e64ad Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 23/56] fix test cases --- tests/system-test/2-query/nestedQuery_str.py | 3502 +++++++++--------- 1 file changed, 1751 insertions(+), 1751 deletions(-) diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py index 6244b37ba4..0d40ef8147 100755 --- a/tests/system-test/2-query/nestedQuery_str.py +++ b/tests/system-test/2-query/nestedQuery_str.py @@ -24,10 +24,10 @@ from util.dnodes import tdDnodes from util.dnodes import * class TDTestCase: - updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} - + def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) @@ -35,13 +35,13 @@ class TDTestCase: self.testcasePath = os.path.split(__file__)[0] self.testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) - + self.num = 10 self.fornum = 5 - + self.db_nest = "nest" self.dropandcreateDB_random("%s" %self.db_nest, 1) - + # regular column select #q_select= ['ts' , '*' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts '] self.q_select= ['ts' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts ', 'q_int_null ', 'q_bigint_null ' , 'q_bigint_null ' , 'q_smallint_null ' , 'q_tinyint_null ' , 'q_bool_null ' , 'q_binary_null ' , 'q_nchar_null ' ,'q_float_null ' , 'q_double_null ' ,'q_ts_null '] @@ -54,11 +54,11 @@ class TDTestCase: self.qt_select= self.q_select + self.t_select # distinct regular column select - self.dq_select= ['distinct q_int', 'distinct q_bigint' , 'distinct q_smallint' , 'distinct q_tinyint' , + self.dq_select= ['distinct q_int', 'distinct q_bigint' , 'distinct q_smallint' , 'distinct q_tinyint' , 'distinct q_bool' , 'distinct q_binary' , 'distinct q_nchar' ,'distinct q_float' , 'distinct q_double' ,'distinct q_ts '] # distinct tag column select - self.dt_select= ['distinct loc', 'distinct t_int', 'distinct t_bigint' , 'distinct t_smallint' , 'distinct t_tinyint' , + self.dt_select= ['distinct loc', 'distinct t_int', 'distinct t_bigint' , 'distinct t_smallint' , 'distinct t_tinyint' , 'distinct t_bool' , 'distinct t_binary' , 'distinct t_nchar' ,'distinct t_float' , 'distinct t_double' ,'distinct t_ts '] # distinct regular and tag column select @@ -69,13 +69,13 @@ class TDTestCase: self.s_s_select= ['tbname' , '_rowts' , '_c0', '_C0' ] self.unionall_or_union= [ ' union ' , ' union all ' ] - # regular column where + # regular column where self.q_where = ['ts < now +1s','q_bigint >= -9223372036854775807 and q_bigint <= 9223372036854775807', 'q_int <= 2147483647 and q_int >= -2147483647', - 'q_smallint >= -32767 and q_smallint <= 32767','q_tinyint >= -127 and q_tinyint <= 127','q_float >= -1.7E308 and q_float <= 1.7E308', - 'q_double >= -1.7E308 and q_double <= 1.7E308', 'q_binary like \'binary%\' or q_binary = \'0\' ' , 'q_nchar like \'nchar%\' or q_nchar = \'0\' ' , + 'q_smallint >= -32767 and q_smallint <= 32767','q_tinyint >= -127 and q_tinyint <= 127','q_float >= -1.7E308 and q_float <= 1.7E308', + 'q_double >= -1.7E308 and q_double <= 1.7E308', 'q_binary like \'binary%\' or q_binary = \'0\' ' , 'q_nchar like \'nchar%\' or q_nchar = \'0\' ' , 'q_bool = true or q_bool = false' , 'q_bool in (0 , 1)' , 'q_bool in ( true , false)' , 'q_bool = 0 or q_bool = 1', - 'q_bigint between -9223372036854775807 and 9223372036854775807',' q_int between -2147483647 and 2147483647','q_smallint between -32767 and 32767', - 'q_bigint not between 9223372036854775807 and -9223372036854775807','q_int not between 2147483647 and -2147483647','q_smallint not between 32767 and -32767', + 'q_bigint between -9223372036854775807 and 9223372036854775807',' q_int between -2147483647 and 2147483647','q_smallint between -32767 and 32767', + 'q_bigint not between 9223372036854775807 and -9223372036854775807','q_int not between 2147483647 and -2147483647','q_smallint not between 32767 and -32767', 'q_tinyint between -127 and 127 ','q_float >= -3.4E38 ','q_float <= 3.4E38 ','q_double >= -1.7E308 ', 'q_double <= 1.7E308 ','q_float between -3.4E38 and 3.4E38 ','q_double between -1.7E308 and 1.7E308 ' ,'q_float not between 3.4E38 and -3.4E38 ','q_double not between 1.7E308 and -1.7E308 ', 'q_float is not null ' ,'q_double is not null ' ,'q_binary match \'binary\' ','q_binary nmatch \'binarynchar\' ','q_nchar match \'nchar\' ','q_nchar nmatch \'binarynchar\' ', @@ -88,38 +88,38 @@ class TDTestCase: 't1.q_smallint >= -32767 and t1.q_smallint <= 32767 and t2.q_smallint >= -32767 and t2.q_smallint <= 32767', 't1.q_tinyint >= -127 and t1.q_tinyint <= 127 and t2.q_tinyint >= -127 and t2.q_tinyint <= 127', 't1.q_float >= - 1.7E308 and t1.q_float <= 1.7E308 and t2.q_float >= - 1.7E308 and t2.q_float <= 1.7E308', - 't1.q_double >= - 1.7E308 and t1.q_double <= 1.7E308 and t2.q_double >= - 1.7E308 and t2.q_double <= 1.7E308', - 't1.q_binary like \'binary%\' and t2.q_binary like \'binary%\' ' , - 't1.q_nchar like \'nchar%\' and t2.q_nchar like \'nchar%\' ' , - 't1.q_bool in (0 , 1) and t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) and t2.q_bool in ( true , false)' , + 't1.q_double >= - 1.7E308 and t1.q_double <= 1.7E308 and t2.q_double >= - 1.7E308 and t2.q_double <= 1.7E308', + 't1.q_binary like \'binary%\' and t2.q_binary like \'binary%\' ' , + 't1.q_nchar like \'nchar%\' and t2.q_nchar like \'nchar%\' ' , + 't1.q_bool in (0 , 1) and t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) and t2.q_bool in ( true , false)' , 't1.q_bigint between -9223372036854775807 and 9223372036854775807 and t2.q_bigint between -9223372036854775807 and 9223372036854775807', 't1.q_int between -2147483647 and 2147483647 and t2.q_int between -2147483647 and 2147483647', - 't1.q_smallint between -32767 and 32767 and t2.q_smallint between -32767 and 32767', + 't1.q_smallint between -32767 and 32767 and t2.q_smallint between -32767 and 32767', 't1.q_tinyint between -127 and 127 and t2.q_tinyint between -127 and 127 ','t1.q_float between -1.7E308 and 1.7E308 and t2.q_float between -1.7E308 and 1.7E308', 't1.q_double between -1.7E308 and 1.7E308 and t2.q_double between -1.7E308 and 1.7E308', 't1.q_bigint not between 9223372036854775807 and -9223372036854775807 and t2.q_bigint not between 9223372036854775807 and -9223372036854775807', 't1.q_int not between 2147483647 and -2147483647 and t2.q_int not between 2147483647 and -2147483647', - 't1.q_smallint not between 32767 and -32767 and t2.q_smallint not between 32767 and -32767', + 't1.q_smallint not between 32767 and -32767 and t2.q_smallint not between 32767 and -32767', 't1.q_tinyint not between 127 and -127 and t2.q_tinyint not between 127 and -127 ','t1.q_float not between -1.7E308 and -1.7E308 and t2.q_float not between 1.7E308 and -1.7E308', 't1.q_double not between 1.7E308 and -1.7E308 and t2.q_double not between 1.7E308 and -1.7E308'] #TD-6201 ,'t1.q_bool between 0 and 1 or t2.q_bool between 0 and 1'] #'t1.q_bool = true and t1.q_bool = false and t2.q_bool = true and t2.q_bool = false' , 't1.q_bool = 0 and t1.q_bool = 1 and t2.q_bool = 0 and t2.q_bool = 1' , - self.q_u_or_where = ['(t1.q_binary like \'binary%\' or t1.q_binary = \'0\' or t2.q_binary like \'binary%\' or t2.q_binary = \'0\' )' , - '(t1.q_nchar like \'nchar%\' or t1.q_nchar = \'0\' or t2.q_nchar like \'nchar%\' or t2.q_nchar = \'0\' )' , '(t1.q_bool = true or t1.q_bool = false or t2.q_bool = true or t2.q_bool = false)' , + self.q_u_or_where = ['(t1.q_binary like \'binary%\' or t1.q_binary = \'0\' or t2.q_binary like \'binary%\' or t2.q_binary = \'0\' )' , + '(t1.q_nchar like \'nchar%\' or t1.q_nchar = \'0\' or t2.q_nchar like \'nchar%\' or t2.q_nchar = \'0\' )' , '(t1.q_bool = true or t1.q_bool = false or t2.q_bool = true or t2.q_bool = false)' , '(t1.q_bool in (0 , 1) or t2.q_bool in (0 , 1))' , '(t1.q_bool in ( true , false) or t2.q_bool in ( true , false))' , '(t1.q_bool = 0 or t1.q_bool = 1 or t2.q_bool = 0 or t2.q_bool = 1)' , '(t1.q_bigint between -9223372036854775807 and 9223372036854775807 or t2.q_bigint between -9223372036854775807 and 9223372036854775807)', '(t1.q_int between -2147483647 and 2147483647 or t2.q_int between -2147483647 and 2147483647)', - '(t1.q_smallint between -32767 and 32767 or t2.q_smallint between -32767 and 32767)', + '(t1.q_smallint between -32767 and 32767 or t2.q_smallint between -32767 and 32767)', '(t1.q_tinyint between -127 and 127 or t2.q_tinyint between -127 and 127 )','(t1.q_float between -1.7E308 and 1.7E308 or t2.q_float between -1.7E308 and 1.7E308)', '(t1.q_double between -1.7E308 and 1.7E308 or t2.q_double between -1.7E308 and 1.7E308)'] # tag column where self.t_where = ['ts < now +1s','t_bigint >= -9223372036854775807 and t_bigint <= 9223372036854775807','t_int <= 2147483647 and t_int >= -2147483647', 't_smallint >= -32767 and t_smallint <= 32767','q_tinyint >= -127 and t_tinyint <= 127','t_float >= -1.7E308 and t_float <= 1.7E308', - 't_double >= -1.7E308 and t_double <= 1.7E308', 't_binary like \'binary%\' or t_binary = \'0\' ' , 't_nchar like \'nchar%\' or t_nchar = \'0\'' , + 't_double >= -1.7E308 and t_double <= 1.7E308', 't_binary like \'binary%\' or t_binary = \'0\' ' , 't_nchar like \'nchar%\' or t_nchar = \'0\'' , 't_bool = true or t_bool = false' , 't_bool in (0 , 1)' , 't_bool in ( true , false)' , 't_bool = 0 or t_bool = 1', - 't_bigint between -9223372036854775807 and 9223372036854775807',' t_int between -2147483647 and 2147483647','t_smallint between -32767 and 32767', + 't_bigint between -9223372036854775807 and 9223372036854775807',' t_int between -2147483647 and 2147483647','t_smallint between -32767 and 32767', 't_tinyint between -127 and 127 ','t_float between -1.7E308 and 1.7E308','t_double between -1.7E308 and 1.7E308', 't_binary match \'binary\' ','t_binary nmatch \'binarynchar\' ','t_nchar match \'nchar\' ','t_nchar nmatch \'binarynchar\' ', 't_binary like \'binary%\' ','t_nchar like \'nchar%\' ','(t_binary like \'binary%\' or t_nchar = \'0\' ) ','(t_nchar like \'nchar%\' or t_binary = \'0\' ) ',] @@ -131,27 +131,27 @@ class TDTestCase: 't1.t_smallint >= -32767 and t1.t_smallint <= 32767 and t2.t_smallint >= -32767 and t2.t_smallint <= 32767', 't1.t_tinyint >= -127 and t1.t_tinyint <= 127 and t2.t_tinyint >= -127 and t2.t_tinyint <= 127', 't1.t_float >= -1.7E308 and t1.t_float <= 1.7E308 and t2.t_float >= -1.7E308 and t2.t_float <= 1.7E308', - 't1.t_double >= -1.7E308 and t1.t_double <= 1.7E308 and t2.t_double >= -1.7E308 and t2.t_double <= 1.7E308', - '(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\') ' , - '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , + 't1.t_double >= -1.7E308 and t1.t_double <= 1.7E308 and t2.t_double >= -1.7E308 and t2.t_double <= 1.7E308', + '(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\') ' , + '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , 't1.t_bool in (0 , 1) and t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) and t2.t_bool in ( true , false)' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)', 't1.t_bigint between -9223372036854775807 and 9223372036854775807 and t2.t_bigint between -9223372036854775807 and 9223372036854775807', 't1.t_int between -2147483647 and 2147483647 and t2.t_int between -2147483647 and 2147483647', - 't1.t_smallint between -32767 and 32767 and t2.t_smallint between -32767 and 32767', + 't1.t_smallint between -32767 and 32767 and t2.t_smallint between -32767 and 32767', '(t1.t_tinyint between -127 and 127 and t2.t_tinyint between -127 and 127) ','t1.t_float between -1.7E308 and 1.7E308 and t2.t_float between -1.7E308 and 1.7E308', '(t1.t_double between -1.7E308 and 1.7E308 and t2.t_double between -1.7E308 and 1.7E308)'] #TD-6201,'t1.t_bool between 0 and 1 or t2.q_bool between 0 and 1'] - self.t_u_or_where = ['(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' )' , - '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , + self.t_u_or_where = ['(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' )' , + '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , '(t1.t_bool in (0 , 1) or t2.t_bool in (0 , 1))' , '(t1.t_bool in ( true , false) or t2.t_bool in ( true , false))' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)', '(t1.t_bigint between -9223372036854775807 and 9223372036854775807 or t2.t_bigint between -9223372036854775807 and 9223372036854775807)', '(t1.t_int between -2147483647 and 2147483647 or t2.t_int between -2147483647 and 2147483647)', - '(t1.t_smallint between -32767 and 32767 or t2.t_smallint between -32767 and 32767)', + '(t1.t_smallint between -32767 and 32767 or t2.t_smallint between -32767 and 32767)', '(t1.t_tinyint between -127 and 127 or t2.t_tinyint between -127 and 127 )','(t1.t_float between -1.7E308 and 1.7E308 or t2.t_float between -1.7E308 and 1.7E308)', '(t1.t_double between -1.7E308 and 1.7E308 or t2.t_double between -1.7E308 and 1.7E308)'] - # regular and tag column where + # regular and tag column where self.qt_where = self.q_where + self.t_where self.qt_u_where = self.q_u_where + self.t_u_where # now,qt_u_or_where is not support @@ -165,78 +165,78 @@ class TDTestCase: self.session_where = ['session(ts,10a)' , 'session(ts,10s)', 'session(ts,10m)' , 'session(ts,10h)','session(ts,10d)' , 'session(ts,10w)'] self.session_u_where = ['session(t1.ts,10a)' , 'session(t1.ts,10s)', 'session(t1.ts,10m)' , 'session(t1.ts,10h)','session(t1.ts,10d)' , 'session(t1.ts,10w)', 'session(t2.ts,10a)' , 'session(t2.ts,10s)', 'session(t2.ts,10m)' , 'session(t2.ts,10h)','session(t2.ts,10d)' , 'session(t2.ts,10w)'] - + self.fill_where = ['FILL(NONE)','FILL(PREV)','FILL(NULL)','FILL(LINEAR)','FILL(NEXT)','FILL(VALUE, 1.23)'] - + self.state_window = ['STATE_WINDOW(q_tinyint)','STATE_WINDOW(q_bigint)','STATE_WINDOW(q_int)','STATE_WINDOW(q_bool)','STATE_WINDOW(q_smallint)'] self.state_u_window = ['STATE_WINDOW(t1.q_tinyint)','STATE_WINDOW(t1.q_bigint)','STATE_WINDOW(t1.q_int)','STATE_WINDOW(t1.q_bool)','STATE_WINDOW(t1.q_smallint)', 'STATE_WINDOW(t2.q_tinyint)','STATE_WINDOW(t2.q_bigint)','STATE_WINDOW(t2.q_int)','STATE_WINDOW(t2.q_bool)','STATE_WINDOW(t2.q_smallint)'] - # order by where + # order by where self.order_where = ['order by ts' , 'order by ts asc'] self.order_u_where = ['order by t1.ts' , 'order by t1.ts asc' , 'order by t2.ts' , 'order by t2.ts asc'] self.order_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' ] self.orders_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' , 'order by loc' , 'order by loc asc' , 'order by loc desc'] - - self.group_where = ['group by tbname , loc' , 'group by tbname', 'group by tbname, t_bigint', 'group by tbname,t_int', 'group by tbname, t_smallint', 'group by tbname,t_tinyint', + + self.group_where = ['group by tbname , loc' , 'group by tbname', 'group by tbname, t_bigint', 'group by tbname,t_int', 'group by tbname, t_smallint', 'group by tbname,t_tinyint', 'group by tbname,t_float', 'group by tbname,t_double' , 'group by tbname,t_binary', 'group by tbname,t_nchar', 'group by tbname,t_bool' ,'group by tbname ,loc ,t_bigint', 'group by tbname,t_binary ,t_nchar ,t_bool' , 'group by tbname,t_int ,t_smallint ,t_tinyint' , 'group by tbname,t_float ,t_double ' , - 'PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', + 'PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint', 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double '] - self.group_where_j = ['group by t1.loc' , 'group by t1.t_bigint', 'group by t1.t_int', 'group by t1.t_smallint', 'group by t1.t_tinyint', + self.group_where_j = ['group by t1.loc' , 'group by t1.t_bigint', 'group by t1.t_int', 'group by t1.t_smallint', 'group by t1.t_tinyint', 'group by t1.t_float', 'group by t1.t_double' , 'group by t1.t_binary', 'group by t1.t_nchar', 'group by t1.t_bool' ,'group by t1.loc ,t1.t_bigint', 'group by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'group by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'group by t1.t_float ,t1.t_double ' , - 'PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', + 'PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint', 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ', - 'group by t2.loc' , 'group by t2.t_bigint', 'group by t2.t_int', 'group by t2.t_smallint', 'group by t2.t_tinyint', + 'group by t2.loc' , 'group by t2.t_bigint', 'group by t2.t_int', 'group by t2.t_smallint', 'group by t2.t_tinyint', 'group by t2.t_float', 'group by t2.t_double' , 'group by t2.t_binary', 'group by t2.t_nchar', 'group by t2.t_bool' ,'group by t2.loc ,t2.t_bigint', 'group by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'group by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'group by t2.t_float ,t2.t_double ' , - 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', + 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint', - 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] - - self.partiton_where = ['PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', + 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] + + self.partiton_where = ['PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint', 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double '] - self.partiton_where_j = ['PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', + self.partiton_where_j = ['PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint', 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ', - 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', + 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint', - 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] + 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] - - self.group_where_regular = ['group by tbname ' , 'group by tbname', 'group by tbname, q_bigint', 'group by tbname,q_int', 'group by tbname, q_smallint', 'group by tbname,q_tinyint', + + self.group_where_regular = ['group by tbname ' , 'group by tbname', 'group by tbname, q_bigint', 'group by tbname,q_int', 'group by tbname, q_smallint', 'group by tbname,q_tinyint', 'group by tbname,q_float', 'group by tbname,q_double' , 'group by tbname,q_binary', 'group by tbname,q_nchar', 'group by tbname,q_bool' ,'group by tbname ,q_bigint', 'group by tbname,q_binary ,q_nchar ,q_bool' , 'group by tbname,q_int ,q_smallint ,q_tinyint' , 'group by tbname,q_float ,q_double ' , - 'PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', + 'PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint', 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double '] - self.group_where_regular_j = ['group by t1.q_bigint', 'group by t1.q_int', 'group by t1.q_smallint', 'group by t1.q_tinyint', + self.group_where_regular_j = ['group by t1.q_bigint', 'group by t1.q_int', 'group by t1.q_smallint', 'group by t1.q_tinyint', 'group by t1.q_float', 'group by t1.q_double' , 'group by t1.q_binary', 'group by t1.q_nchar', 'group by t1.q_bool' ,'group by t1.q_bigint', 'group by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'group by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'group by t1.q_float ,t1.q_double ' , - 'PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', + 'PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint', 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ', - 'group by t2.q_bigint', 'group by t2.q_int', 'group by t2.q_smallint', 'group by t2.q_tinyint', + 'group by t2.q_bigint', 'group by t2.q_int', 'group by t2.q_smallint', 'group by t2.q_tinyint', 'group by t2.q_float', 'group by t2.q_double' , 'group by t2.q_binary', 'group by t2.q_nchar', 'group by t2.q_bool' ,'group by t2.q_bigint', 'group by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'group by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'group by t2.q_float ,t2.q_double ' , - 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', + 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint', - 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] - - self.partiton_where_regular = ['PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', + 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] + + self.partiton_where_regular = ['PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint', 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double '] - self.partiton_where_regular_j = ['PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', + self.partiton_where_regular_j = ['PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint', 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ', - 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', + 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint', - 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] - + 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] + self.having_support = ['having count(q_int) > 0','having count(q_bigint) > 0','having count(q_smallint) > 0','having count(q_tinyint) > 0','having count(q_float) > 0','having count(q_double) > 0','having count(q_bool) > 0', 'having avg(q_int) > 0','having avg(q_bigint) > 0','having avg(q_smallint) > 0','having avg(q_tinyint) > 0','having avg(q_float) > 0','having avg(q_double) > 0', 'having sum(q_int) > 0','having sum(q_bigint) > 0','having sum(q_smallint) > 0','having sum(q_tinyint) > 0','having sum(q_float) > 0','having sum(q_double) > 0', @@ -251,9 +251,9 @@ class TDTestCase: self.having_not_support = ['having TOP(q_int,10) > 0','having TOP(q_bigint,10) > 0','having TOP(q_smallint,10) > 0','having TOP(q_tinyint,10) > 0','having TOP(q_float,10) > 0','having TOP(q_double,10) > 0','having TOP(q_bool,10) > 0', 'having BOTTOM(q_int,10) > 0','having BOTTOM(q_bigint,10) > 0','having BOTTOM(q_smallint,10) > 0','having BOTTOM(q_tinyint,10) > 0','having BOTTOM(q_float,10) > 0','having BOTTOM(q_double,10) > 0','having BOTTOM(q_bool,10) > 0', 'having LEASTSQUARES(q_int) > 0','having LEASTSQUARES(q_bigint) > 0','having LEASTSQUARES(q_smallint) > 0','having LEASTSQUARES(q_tinyint) > 0','having LEASTSQUARES(q_float) > 0','having LEASTSQUARES(q_double) > 0','having LEASTSQUARES(q_bool) > 0', - 'having FIRST(q_bool) > 0','having IRATE(q_bool) > 0','having PERCENTILE(q_bool,10) > 0','having avg(q_bool) > 0','having LAST_ROW(q_bool) > 0','having sum(q_bool) > 0','having STDDEV(q_bool) > 0','having APERCENTILE(q_bool,10) > 0','having TWA(q_bool) > 0','having LAST(q_bool) > 0', + 'having FIRST(q_bool) > 0','having IRATE(q_bool) > 0','having PERCENTILE(q_bool,10) > 0','having avg(q_bool) > 0','having LAST_ROW(q_bool) > 0','having sum(q_bool) > 0','having STDDEV(q_bool) > 0','having APERCENTILE(q_bool,10) > 0','having TWA(q_bool) > 0','having LAST(q_bool) > 0', 'having PERCENTILE(q_int,10) > 0','having PERCENTILE(q_bigint,10) > 0','having PERCENTILE(q_smallint,10) > 0','having PERCENTILE(q_tinyint,10) > 0','having PERCENTILE(q_float,10) > 0','having PERCENTILE(q_double,10) > 0'] - self.having_tagnot_support = ['having LAST_ROW(q_int) > 0','having LAST_ROW(q_bigint) > 0','having LAST_ROW(q_smallint) > 0','having LAST_ROW(q_tinyint) > 0','having LAST_ROW(q_float) > 0','having LAST_ROW(q_double) > 0'] + self.having_tagnot_support = ['having LAST_ROW(q_int) > 0','having LAST_ROW(q_bigint) > 0','having LAST_ROW(q_smallint) > 0','having LAST_ROW(q_tinyint) > 0','having LAST_ROW(q_float) > 0','having LAST_ROW(q_double) > 0'] self.having_support_j = ['having count(t1.q_int) > 0','having count(t1.q_bigint) > 0','having count(t1.q_smallint) > 0','having count(t1.q_tinyint) > 0','having count(t1.q_float) > 0','having count(t1.q_double) > 0','having count(t1.q_bool) > 0', 'having avg(t1.q_int) > 0','having avg(t1.q_bigint) > 0','having avg(t1.q_smallint) > 0','having avg(t1.q_tinyint) > 0','having avg(t1.q_float) > 0','having avg(t1.q_double) > 0', @@ -266,7 +266,7 @@ class TDTestCase: 'having FIRST(t1.q_int) > 0','having FIRST(t1.q_bigint) > 0','having FIRST(t1.q_smallint) > 0','having FIRST(t1.q_tinyint) > 0','having FIRST(t1.q_float) > 0','having FIRST(t1.q_double) > 0', 'having LAST(t1.q_int) > 0','having LAST(t1.q_bigint) > 0','having LAST(t1.q_smallint) > 0','having LAST(t1.q_tinyint) > 0','having LAST(t1.q_float) > 0','having LAST(t1.q_double) > 0', 'having APERCENTILE(t1.q_int,10) > 0','having APERCENTILE(t1.q_bigint,10) > 0','having APERCENTILE(t1.q_smallint,10) > 0','having APERCENTILE(t1.q_tinyint,10) > 0','having APERCENTILE(t1.q_float,10) > 0','having APERCENTILE(t1.q_double,10) > 0'] - + # limit offset where self.limit_where = ['limit 1 offset 1' , 'limit 1' , 'limit 2 offset 1' , 'limit 2', 'limit 12 offset 1' , 'limit 20', 'limit 20 offset 10' , 'limit 200'] self.limit1_where = ['limit 1 offset 1' , 'limit 1' ] @@ -275,8 +275,8 @@ class TDTestCase: # slimit soffset where self.slimit_where = ['slimit 1 soffset 1' , 'slimit 1' , 'slimit 2 soffset 1' , 'slimit 2'] self.slimit1_where = ['slimit 2 soffset 1' , 'slimit 1' ] - - # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\] + + # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\] # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile] # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\] # **_ns_** express is not support stable, therefore, separated from regular tables @@ -286,7 +286,7 @@ class TDTestCase: # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile] - + self.calc_select_all = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , @@ -294,7 +294,7 @@ class TDTestCase: 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' , 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' , - 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)'] self.calc_select_in_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , @@ -305,23 +305,23 @@ class TDTestCase: self.calc_select_in = ['min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' , 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' , - 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)'] - + self.calc_select_not_support_ts = ['first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' , - 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)', - 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)'] - + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)'] + self.calc_select_support_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ] - + self.calc_select_regular = [ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)'] - + self.calc_select_fill = ['INTERP(q_int)' ,'INTERP(q_bigint)' ,'INTERP(q_smallint)' ,'INTERP(q_tinyint)', 'INTERP(q_float)' ,'INTERP(q_double)'] self.interp_where = ['ts = now' , 'ts = \'2020-09-13 20:26:40.000\'' , 'ts = \'2020-09-13 20:26:40.009\'' ,'tbname in (\'table_1\') and ts = now' ,'tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and ts = \'2020-09-13 20:26:40.000\'','tbname like \'table%\' and ts = \'2020-09-13 20:26:40.002\''] @@ -346,28 +346,28 @@ class TDTestCase: ] self.calc_select_in_not_support_ts_j = ['apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' , - 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , + 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' , 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' , - 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , + 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] self.calc_select_in_j = ['min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' , 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' , 'apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' , - 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , + 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' , 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' , 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' , 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' , - 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , - 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] + 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , + 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] self.calc_select_all_j = self.calc_select_in_ts_j + self.calc_select_in_j self.calc_select_regular_j = [ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' , 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)'] - + self.calc_select_fill_j = ['INTERP(t1.q_int)' ,'INTERP(t1.q_bigint)' ,'INTERP(t1.q_smallint)' ,'INTERP(t1.q_tinyint)', 'INTERP(t1.q_float)' ,'INTERP(t1.q_double)' , 'INTERP(t2.q_int)' ,'INTERP(t2.q_bigint)' ,'INTERP(t2.q_smallint)' ,'INTERP(t2.q_tinyint)', 'INTERP(t2.q_float)' ,'INTERP(t2.q_double)'] self.interp_where_j = ['t1.ts = now' , 't1.ts = \'2020-09-13 20:26:40.000\'' , 't1.ts = \'2020-09-13 20:26:40.009\'' ,'t2.ts = now' , 't2.ts = \'2020-09-13 20:26:40.000\'' , 't2.ts = \'2020-09-13 20:26:40.009\'' , @@ -389,7 +389,7 @@ class TDTestCase: 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)'] self.calc_aggregate_groupbytbname = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' , - 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ] + 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ] #two table join self.calc_aggregate_all_j = ['count(t1.*)' , 'count(t1.q_int)' ,'count(t1.q_bigint)' , 'count(t1.q_smallint)' ,'count(t1.q_tinyint)' ,'count(t1.q_float)' , @@ -417,18 +417,18 @@ class TDTestCase: self.calc_aggregate_groupbytbname_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' , 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' , 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' , - 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)' ] - + 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)' ] + # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\] - self.calc_calculate_all = ['SPREAD(ts)' , 'SPREAD(q_ts)' , 'SPREAD(q_int)' ,'SPREAD(q_bigint)' , 'SPREAD(q_smallint)' ,'SPREAD(q_tinyint)' ,'SPREAD(q_float)' ,'SPREAD(q_double)' , + self.calc_calculate_all = ['SPREAD(ts)' , 'SPREAD(q_ts)' , 'SPREAD(q_int)' ,'SPREAD(q_bigint)' , 'SPREAD(q_smallint)' ,'SPREAD(q_tinyint)' ,'SPREAD(q_float)' ,'SPREAD(q_double)' , '(SPREAD(q_int) + SPREAD(q_bigint))' , '(SPREAD(q_smallint) - SPREAD(q_float))', '(SPREAD(q_double) * SPREAD(q_tinyint))' , '(SPREAD(q_double) / SPREAD(q_float))'] self.calc_calculate_regular = ['DIFF(q_int)' ,'DIFF(q_bigint)' , 'DIFF(q_smallint)' ,'DIFF(q_tinyint)' ,'DIFF(q_float)' ,'DIFF(q_double)' , 'DIFF(q_int,0)' ,'DIFF(q_bigint,0)' , 'DIFF(q_smallint,0)' ,'DIFF(q_tinyint,0)' ,'DIFF(q_float,0)' ,'DIFF(q_double,0)' , 'DIFF(q_int,1)' ,'DIFF(q_bigint,1)' , 'DIFF(q_smallint,1)' ,'DIFF(q_tinyint,1)' ,'DIFF(q_float,1)' ,'DIFF(q_double,1)' , 'DERIVATIVE(q_int,15s,0)' , 'DERIVATIVE(q_bigint,10s,1)' , 'DERIVATIVE(q_smallint,20s,0)' ,'DERIVATIVE(q_tinyint,10s,1)' ,'DERIVATIVE(q_float,6s,0)' ,'DERIVATIVE(q_double,3s,1)' ] self.calc_calculate_groupbytbname = self.calc_calculate_regular - + #two table join self.calc_calculate_all_j = ['SPREAD(t1.ts)' , 'SPREAD(t1.q_ts)' , 'SPREAD(t1.q_int)' ,'SPREAD(t1.q_bigint)' , 'SPREAD(t1.q_smallint)' ,'SPREAD(t1.q_tinyint)' ,'SPREAD(t1.q_float)' ,'SPREAD(t1.q_double)' , 'SPREAD(t2.ts)' , 'SPREAD(t2.q_ts)' , 'SPREAD(t2.q_int)' ,'SPREAD(t2.q_bigint)' , 'SPREAD(t2.q_smallint)' ,'SPREAD(t2.q_tinyint)' ,'SPREAD(t2.q_float)' ,'SPREAD(t2.q_double)' , @@ -452,17 +452,17 @@ class TDTestCase: 'interval(1y,1n) ','interval(1n,1w) ','interval(1w,1d) ','interval(1d,1h) ','interval(1h,1m) ','interval(1m,1s) ','interval(1s,10a) ' ,'interval(100a,30a)'] self.conn1 = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos/") - self.cur1 = self.conn1.cursor() - print(self.cur1) + self.cur1 = self.conn1.cursor() + print(self.cur1) self.cur1.execute("use %s ;" %self.db_nest) sql = 'select * from stable_1 limit 5;' self.cur1.execute(sql) - + def data_matrix_equal(self, sql1,row1_s,row1_e,col1_s,col1_e, sql2,row2_s,row2_e,col2_s,col2_e): # ----row1_start----col1_start---- - # - - - - 是一个矩阵内的数据相等- - - - # - - - - - - - - - - - - - - - - + # - - - - 是一个矩阵内的数据相等- - - + # - - - - - - - - - - - - - - - - # ----row1_end------col1_end------ self.sql1 = sql1 list1 =[] @@ -474,9 +474,9 @@ class TDTestCase: #print("data=%s" %(tdSql.getData(i1,j1))) list1.append(tdSql.getData(i1,j1)) print("=====list1-------list1---=%s" %set(list1)) - + tdSql.execute("reset query cache;") - self.sql2 = sql2 + self.sql2 = sql2 list2 =[] tdSql.query(sql2) for i2 in range(row2_s-1,row2_e): @@ -485,8 +485,8 @@ class TDTestCase: #print("jjjj222=%d"%j2) #print("data=%s" %(tdSql.getData(i2,j2))) list2.append(tdSql.getData(i2,j2)) - print("=====list2-------list2---=%s" %set(list2)) - + print("=====list2-------list2---=%s" %set(list2)) + if (list1 == list2) and len(list2)>0: # print(("=====matrix===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) tdLog.info(("===matrix===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) @@ -512,7 +512,7 @@ class TDTestCase: print(("=====matrix_error===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) tdLog.info(("sql1:'%s' matrix_result != sql2:'%s' matrix_result") %(sql1,sql2)) return tdSql.checkEqual(list1,list2) - + def restartDnodes(self): pass # tdDnodes.stop(1) @@ -536,7 +536,7 @@ class TDTestCase: q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') - + tdSql.execute('''create stable stable_null_data (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ @@ -548,35 +548,35 @@ class TDTestCase: q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') - + #tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '0' , '0' , '0' , '0' , 0 , 'binary1' , 'nchar1' , '0' , '0' ,'0') ;''') - tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '%d' , '%d', '%d' , '%d' , 0 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' - %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), - fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , - fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '%d' , '%d', '%d' , '%d' , 0 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) tdSql.execute('''create table stable_1_2 using stable_1 tags('stable_1_2', '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , 'binary2' , 'nchar2' , '2' , '22' , \'1999-09-09 09:09:09.090\') ;''') tdSql.execute('''create table stable_1_3 using stable_1 tags('stable_1_3', '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , 'binary3' , 'nchar3nchar3' , '-3.3' , '-33.33' , \'2099-09-09 09:09:09.090\') ;''') #tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') - tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' - %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), - fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , - fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) # tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,'0') ;''') # tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') # tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') - - tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,\'2099-09-09 09:09:09.090\') ;''') - tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '%d' , '%d', '%d' , '%d' , 0 , 'binary2.%s' , 'nchar2.%s' , '%f', '%f' ,'%d') ;''' - %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), - fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , - fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) - tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' - %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), - fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , - fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,\'2099-09-09 09:09:09.090\') ;''') + tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '%d' , '%d', '%d' , '%d' , 0 , 'binary2.%s' , 'nchar2.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) #regular table tdSql.execute('''create table regular_table_1 \ @@ -602,92 +602,92 @@ class TDTestCase: q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') - for i in range(num_random*n): + for i in range(num_random*n): tdSql.execute('''insert into stable_1_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1), - fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), - fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1), + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) tdSql.execute('''insert into regular_table_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) , - fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) , - fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) , + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) , + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8)\ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), - fake.random_int(min=0, max=9223372036854775807, step=1), - fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), - fake.random_int(min=0, max=9223372036854775807, step=1), - fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) - + tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), - fake.random_int(min=-9223372036854775807, max=0, step=1), - fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), - fake.random_int(min=-9223372036854775807, max=0, step=1), - fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1), - fake.random_int(min=-0, max=9223372036854775807, step=1), - fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1), - fake.random_int(min=-0, max=9223372036854775807, step=1), - fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ - 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' - % (ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1), - fake.random_int(min=-0, max=9223372036854775807, step=1), - fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , - fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) tdSql.query("select count(*) from stable_1;") @@ -696,149 +696,149 @@ class TDTestCase: tdSql.checkData(0,0,num_random*n) def math_nest(self,mathlist): - - print("==========%s===start=============" %mathlist) - os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) - + + print("==========%s===start=============" %mathlist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + self.dropandcreateDB_random("%s" %self.db_nest, 1) - + if (mathlist == ['ABS','SQRT']) or (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['FLOOR','CEIL','ROUND']) \ or (mathlist == ['CSUM']) or (mathlist == ['']): - math_functions = mathlist - fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] + math_functions = mathlist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") - + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', - '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") - + elif (mathlist == ['UNIQUE']) or (mathlist == ['HYPERLOGLOG']): - math_functions = mathlist + math_functions = mathlist fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_binary)','(q_nchar)','(q_bool)','(q_ts)', - '(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)','(q_binary_null)','(q_nchar_null)','(q_bool_null)','(q_ts_null)'] + '(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)','(q_binary_null)','(q_nchar_null)','(q_bool_null)','(q_ts_null)'] fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") - + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', - '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") elif (mathlist == ['POW','LOG']) or (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']): - math_functions = mathlist - num = random.randint(0, 1000) + math_functions = mathlist + num = random.randint(0, 1000) fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)', - '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] + '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) - + fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)', '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)', '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)', - '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] + '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) - + tdSql.query("select 1-1 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : - sql = "select ts , floor(asct1) from ( select " - sql += "%s as asct1, " % math_fun_1 - sql += "%s as asct2, " % math_fun_2 + sql = "select ts , floor(asct1) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct2, " % math_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): - sql = "select floor(asct1) from ( select " - sql += "%s as asct1 " % math_fun_1 - # sql += "%s as asct2, " % math_fun_2 + sql = "select floor(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + # sql += "%s as asct2, " % math_fun_2 # sql += "%s, " % random.choice(self.s_s_select) - # sql += "%s, " % random.choice(self.q_select) + # sql += "%s, " % random.choice(self.q_select) sql += " from regular_table_1 where " sql += "%s " % random.choice(self.q_where) #sql += "%s " % random.choice(self.order_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) #tdSql.checkRows(100) self.cur1.execute(sql) - + tdSql.query("select 1-2 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select ts , abs(asct1) from ( select " - sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct1, " % math_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s )" % random.choice(self.order_where) - sql += "%s " % random.choice(self.unionall_or_union) + sql += "%s " % random.choice(self.unionall_or_union) sql += "select ts , asct2 from ( select " - sql += "%s as asct2, " % math_fun_2 + sql += "%s as asct2, " % math_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) #sql += "%s " % random.choice(having_support) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) - #TD-15437 self.cur1.execute(sql) + #TD-15437 self.cur1.execute(sql) elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ - or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): sql = "select abs(asct1) from ( select " - sql += "%s as asct1 " % math_fun_1 + sql += "%s as asct1 " % math_fun_1 # sql += "%s, " % random.choice(self.s_s_select) - # sql += "%s, " % random.choice(self.q_select) + # sql += "%s, " % random.choice(self.q_select) sql += "from regular_table_1 where " sql += "%s )" % random.choice(self.q_where) #sql += "%s )" % random.choice(self.order_where) - sql += "%s " % random.choice(self.unionall_or_union) + sql += "%s " % random.choice(self.unionall_or_union) sql += "select floor(asct2) from ( select " - sql += "%s as asct2 " % math_fun_2 + sql += "%s as asct2 " % math_fun_2 # sql += "%s, " % random.choice(self.s_s_select) - # sql += "%s, " % random.choice(self.q_select) + # sql += "%s, " % random.choice(self.q_select) sql += " from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) #sql += "%s " % random.choice(having_support) #sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15473 tdSql.query(sql) - #TD-15473 self.cur1.execute(sql) - + #TD-15473 self.cur1.execute(sql) + tdSql.query("select 1-3 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ @@ -847,20 +847,20 @@ class TDTestCase: sql += "%s as asct1, ts ," % math_fun_1 sql += "%s as asct2, " % math_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s select " % random.choice(self.unionall_or_union) sql += "%s as asct2, ts ," % math_fun_2 sql += "%s as asct1, " % math_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_2 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15473 tdSql.query(sql) #self.cur1.execute(sql) elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ @@ -869,86 +869,86 @@ class TDTestCase: sql += "%s as asct1, ts ," % math_fun_1 sql += "%s as asct2, " % math_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s select " % random.choice(self.unionall_or_union) sql += "%s as asct2, ts ," % math_fun_2 sql += "%s as asct1, " % math_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_2 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15473 tdSql.query(sql) #self.cur1.execute(sql) - + tdSql.query("select 1-4 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select ts , asct1 from ( select t1.ts as ts," - sql += "%s, " % math_fun_join_1 - sql += "%s as asct1, " % math_fun_join_2 - sql += "%s, " % math_fun_join_1 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "%s, " % math_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): sql = "select count(asct1) from ( select " - sql += "%s as asct1 " % math_fun_join_2 + sql += "%s as asct1 " % math_fun_join_2 sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s " % random.choice(self.q_u_or_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 1-5 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select ts ," - sql += "%s, " % math_fun_1 + sql += "%s, " % math_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % math_fun_2 sql += " from ( select * from regular_table_1 ) where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): sql = "select " - # sql += "%s, " % math_fun_1 + # sql += "%s, " % math_fun_1 + # sql += "%s, " % random.choice(self.q_select) # sql += "%s, " % random.choice(self.q_select) - # sql += "%s, " % random.choice(self.q_select) sql += "%s " % math_fun_2 sql += " from ( select * from regular_table_1 ) where " sql += "%s " % random.choice(self.q_where) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15973 tdSql.query(sql) #TD-15973 self.cur1.execute(sql) @@ -957,35 +957,35 @@ class TDTestCase: if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select ts , max(asct1) from ( select t1.ts as ts," - sql += "%s, " % math_fun_join_1 - sql += "%s as asct1, " % math_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "%s, " % math_fun_join_1 + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % math_fun_join_1 sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s )" % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): sql = "select max(asct1) from ( select " - #sql += "%s, " % math_fun_join_1 - sql += "%s as asct1 " % math_fun_join_2 - # sql += "t1.%s, " % random.choice(self.q_select) - # sql += "t2.%s, " % random.choice(self.q_select) - # sql += "%s, " % math_fun_join_1 + #sql += "%s, " % math_fun_join_1 + sql += "%s as asct1 " % math_fun_join_2 + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + # sql += "%s, " % math_fun_join_1 sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s )" % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 1-7 as math_nest from stable_1 limit 1;") @@ -995,13 +995,13 @@ class TDTestCase: sql = "select ts , abs(asct1) from ( select " sql += "%s as asct1, ts ," % math_fun_1 sql += "%s as asct2, " % math_fun_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # tdSql.checkRows(300) @@ -1013,11 +1013,11 @@ class TDTestCase: sql += "from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 - + tdSql.query("select 1-8 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ @@ -1027,13 +1027,13 @@ class TDTestCase: sql += "%s, " % random.choice(self.s_s_select) sql += "%s as asct1, ts ," % math_fun_1 sql += "%s as asct2, " % math_fun_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) #tdSql.query(sql) # tdSql.checkRows(300) @@ -1046,7 +1046,7 @@ class TDTestCase: sql += " from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 @@ -1056,12 +1056,12 @@ class TDTestCase: if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select ts , max(asct1) from ( select t1.ts as ts," - sql += "%s, " % math_fun_join_1 - sql += "%s as asct1, " % math_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "and %s " % random.choice(self.t_u_where) @@ -1069,29 +1069,29 @@ class TDTestCase: sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): - sql = "select max(asct1) from ( select " - sql += "%s as asct1 " % math_fun_join_2 - # sql += "t1.%s, " % random.choice(self.q_select) - # sql += "t1.%s, " % random.choice(self.q_select) - # sql += "t2.%s, " % random.choice(self.q_select) - # sql += "t2.%s, " % random.choice(self.q_select) + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "and %s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql)# TD-16039 # self.cur1.execute(sql) - + self.restartDnodes() tdSql.query("select 1-10 as math_nest from stable_1 limit 1;") for i in range(self.fornum): @@ -1116,8 +1116,8 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) #TD-15437 self.cur1.execute(sql) elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ @@ -1133,11 +1133,11 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) #TD-15437 self.cur1.execute(sql) - + #3 inter union not support tdSql.query("select 1-11 as math_nest from stable_1 limit 1;") for i in range(self.fornum): @@ -1163,8 +1163,8 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15837 tdSql.query(sql) # self.cur1.execute(sql) elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ @@ -1180,8 +1180,8 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15837 tdSql.query(sql) #self.cur1.execute(sql) @@ -1190,52 +1190,52 @@ class TDTestCase: if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select ts , max(asct1) from ( select t1.ts as ts," - sql += "%s, " % math_fun_join_1 - sql += "%s as asct1, " % math_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): - sql = "select max(asct1) from ( select " - sql += "%s as asct1 " % math_fun_join_2 + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.limit1_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 - + tdSql.query("select 1-13 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select ts ," - sql += "%s, " % math_fun_1 + sql += "%s, " % math_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % math_fun_2 - sql += "%s " % random.choice(self.t_select) + sql += "%s " % random.choice(self.t_select) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # tdSql.checkRows(300) # self.cur1.execute(sql) # TD-16039 @@ -1243,21 +1243,21 @@ class TDTestCase: or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): sql = "select " sql += "%s " % math_fun_2 - sql += "%s " % random.choice(self.t_select) + sql += "%s " % random.choice(self.t_select) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD15973 tdSql.query(sql) #TD15973 self.cur1.execute(sql) - + tdSql.query("select 1-14 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select avg(asct1),count(asct2) from ( select " - sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct1, " % math_fun_1 sql += "%s as asct2" % math_fun_2 sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) @@ -1265,63 +1265,63 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ) ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): sql = "select avg(asct1) from ( select " - sql += "%s as asct1 " % math_fun_1 + sql += "%s as asct1 " % math_fun_1 sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.partiton_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ) ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 - + tdSql.query("select 1-15 as math_nest from stable_1 limit 1;") for i in range(self.fornum): if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : sql = "select ts , max(asct1) from ( select t1.ts as ts," - sql += "%s, " % math_fun_join_1 - sql += "%s as asct1, " % math_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s " % random.choice(self.q_select) + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) - sql += ") " + sql += ") " sql += "%s " % random.choice(self.order_desc_where) sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ - or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): - sql = "select max(asct1) from ( select " - sql += "%s as asct1 " % math_fun_join_2 + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) - sql += ") " + sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 - + #taos -f sql startTime_taos_f = time.time() print("taos -f %s sql start!" %mathlist) @@ -1329,271 +1329,271 @@ class TDTestCase: _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") print("taos -f %s sql over!" %mathlist) endTime_taos_f = time.time() - print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) - - print("=========%s====over=============" %mathlist) + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %mathlist) def str_nest(self,strlist): - - print("==========%s===start=============" %strlist) - os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) - + + print("==========%s===start=============" %strlist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + self.dropandcreateDB_random("%s" %self.db_nest, 1) - + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['LENGTH','CHAR_LENGTH']) \ or (strlist == ['']): - str_functions = strlist - fun_fix_column = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)'] + str_functions = strlist + fun_fix_column = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)'] fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") - + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)', - '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)'] + '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)'] fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") - - fun_fix_column_s = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)','(loc)','(tbname)'] + + fun_fix_column_s = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)','(loc)','(tbname)'] fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","").replace(", ","") - + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)','(t1.loc)','(t1.tbname)', - '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)','(t2.loc)','(t2.tbname)'] + '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)','(t2.loc)','(t2.tbname)'] fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") - + elif (strlist == ['SUBSTR']) : - str_functions = strlist - pos = random.randint(1, 20) - sub_len = random.randint(1, 10) + str_functions = strlist + pos = random.randint(1, 20) + sub_len = random.randint(1, 10) fun_fix_column = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)', - '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)',] + '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)',] fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) - + fun_fix_column_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)', '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)', '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)', - '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)'] + '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)'] fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) - + fun_fix_column_s = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)','(loc,pos)', - '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)','(loc,pos,sub_len)',] + '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)','(loc,pos,sub_len)',] fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) str_fun_s_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) str_fun_s_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) - + fun_fix_column_s_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)','(t1.loc,pos)', '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)','(t1.loc,pos,sub_len)', '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)','(t2.loc,pos)', - '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)','(t2.loc,pos,sub_len)'] + '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)','(t2.loc,pos,sub_len)'] fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1) str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1) str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) - + elif (strlist == ['CONCAT']) : - str_functions = strlist - i = random.randint(2,8) + str_functions = strlist + i = random.randint(2,8) fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null', - 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] - - column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+column1+')' str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") - + column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+column2+')' str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") - + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)', '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)', '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', - '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] - - column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+column_j1+')' str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") - - column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + + column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+column_j2+')' str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") - + fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null', - 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] - - column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") fun_column_s_1 = str(random.sample(str_functions,1))+'('+column_s1+')' str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","") - - column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + + column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") fun_column_s_2 = str(random.sample(str_functions,1))+'('+column_s2+')' str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","") - + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)', '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)', '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] - - column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + + column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+column_j_s1+')' str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","") - - column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + + column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+column_j_s2+')' str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","") - + elif (strlist == ['CONCAT_WS']): - str_functions = strlist - i = random.randint(2,8) + str_functions = strlist + i = random.randint(2,8) fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null', - 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] - + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + separators = ['',' ','abc','123','!','@','#','$','%','^','&','*','(',')','-','_','+','=','{', '[','}',']','|',';',':',',','.','<','>','?','/','~','`','taos','涛思'] - separator = str(random.sample(separators,i)).replace("[","").replace("]","") - - column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + separator = str(random.sample(separators,i)).replace("[","").replace("]","") + + column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column1+')' str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") - + column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column2+')' str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") - + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)', '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)', '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', - '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] - - column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j1+')' str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") - - column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + + column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j2+')' str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") - + fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null', - 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] - - column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") fun_column_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s1+')' str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","") - - column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + + column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") fun_column_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s2+')' str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","") - + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)', '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)', '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] - - column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + + column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s1+')' str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","") - - column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + + column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s2+')' str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","") - - + + tdSql.query("select 1-1 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) : - sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " - sql += "%s as asct1, " % str_fun_1 - sql += "%s as asct2, " % str_fun_2 + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct2, " % str_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (strlist == ['LENGTH','CHAR_LENGTH']): - sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " - sql += "%s as asct1, " % str_fun_1 - sql += "%s as asct2, " % str_fun_2 + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct2, " % str_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 1-2 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) : sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " - sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct1, " % str_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s )" % random.choice(self.order_where) - sql += "%s " % random.choice(self.unionall_or_union) + sql += "%s " % random.choice(self.unionall_or_union) sql += "select ts , asct2 from ( select " - sql += "%s as asct2, " % str_fun_2 + sql += "%s as asct2, " % str_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) #sql += "%s " % random.choice(having_support) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) - #TD-15437 self.cur1.execute(sql) - elif (strlist == ['LENGTH','CHAR_LENGTH']): + #TD-15437 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " - sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct1, " % str_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s )" % random.choice(self.order_where) - sql += "%s " % random.choice(self.unionall_or_union) + sql += "%s " % random.choice(self.unionall_or_union) sql += "select ts , asct2 from ( select " - sql += "%s as asct2, " % str_fun_2 + sql += "%s as asct2, " % str_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) #sql += "%s " % random.choice(having_support) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) - #TD-15437 self.cur1.execute(sql) - + #TD-15437 self.cur1.execute(sql) + tdSql.query("select 1-3 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): @@ -1601,20 +1601,20 @@ class TDTestCase: sql += "%s as asct1, ts ," % str_fun_1 sql += "%s as asct2, " % str_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s select " % random.choice(self.unionall_or_union) sql += "%s as asct2, ts ," % str_fun_2 sql += "%s as asct1, " % str_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_2 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15473 tdSql.query(sql) #self.cur1.execute(sql) elif (strlist == ['LENGTH','CHAR_LENGTH']): @@ -1622,88 +1622,88 @@ class TDTestCase: sql += "%s as asct1, ts ," % str_fun_1 sql += "%s as asct2, " % str_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s select " % random.choice(self.unionall_or_union) sql += "%s as asct2, ts ," % str_fun_2 sql += "%s as asct1, " % str_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_2 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15473 tdSql.query(sql) #self.cur1.execute(sql) - + tdSql.query("select 1-4 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_1 - sql += "%s as asct1, " % str_fun_join_2 - sql += "%s, " % str_fun_join_1 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "%s, " % str_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_1 - sql += "%s as asct1, " % str_fun_join_2 - sql += "%s, " % str_fun_join_1 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "%s, " % str_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 1-5 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): sql = "select ts ," - sql += "%s, " % str_fun_1 + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % str_fun_2 sql += " from ( select * from regular_table_1 ) where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select ts ," - sql += "%s, " % str_fun_1 + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % str_fun_2 sql += " from ( select * from regular_table_1 ) where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) @@ -1712,34 +1712,34 @@ class TDTestCase: for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_1 - sql += "%s as asct1, " % str_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "%s, " % str_fun_join_1 + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_join_1 sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s )" % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_1 - sql += "%s as asct1, " % str_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "%s, " % str_fun_join_1 + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_join_1 sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s )" % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 1-7 as str_nest from stable_1 limit 1;") @@ -1748,13 +1748,13 @@ class TDTestCase: sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " sql += "%s as asct1, ts ," % str_fun_s_1 sql += "%s as asct2, " % str_fun_s_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # tdSql.checkRows(300) @@ -1763,17 +1763,17 @@ class TDTestCase: sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " sql += "%s as asct1, ts ," % str_fun_s_1 sql += "%s as asct2, " % str_fun_s_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 - + tdSql.query("select 1-8 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): @@ -1782,13 +1782,13 @@ class TDTestCase: sql += "%s, " % random.choice(self.s_s_select) sql += "%s as asct1, ts ," % str_fun_s_1 sql += "%s as asct2, " % str_fun_s_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # tdSql.checkRows(300) @@ -1799,13 +1799,13 @@ class TDTestCase: sql += "%s, " % random.choice(self.s_s_select) sql += "%s as asct1, ts ," % str_fun_s_1 sql += "%s as asct2, " % str_fun_s_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 @@ -1814,12 +1814,12 @@ class TDTestCase: for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_s_1 - sql += "%s as asct1, " % str_fun_join_s_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "and %s " % random.choice(self.t_u_where) @@ -1827,18 +1827,18 @@ class TDTestCase: sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_s_1 - sql += "%s as asct1, " % str_fun_join_s_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "and %s " % random.choice(self.t_u_where) @@ -1846,11 +1846,11 @@ class TDTestCase: sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 - + self.restartDnodes() tdSql.query("select 1-10 as str_nest from stable_1 limit 1;") for i in range(self.fornum): @@ -1874,8 +1874,8 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) #TD-15437 self.cur1.execute(sql) elif (strlist == ['LENGTH','CHAR_LENGTH']): @@ -1898,11 +1898,11 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) #TD-15437 self.cur1.execute(sql) - + #3 inter union not support tdSql.query("select 1-11 as str_nest from stable_1 limit 1;") for i in range(self.fornum): @@ -1927,8 +1927,8 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15837 tdSql.query(sql) # self.cur1.execute(sql) elif (strlist == ['LENGTH','CHAR_LENGTH']): @@ -1952,8 +1952,8 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15837 tdSql.query(sql) # self.cur1.execute(sql) @@ -1961,81 +1961,81 @@ class TDTestCase: for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_s_1 - sql += "%s as asct1, " % str_fun_join_s_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_s_1 - sql += "%s as asct1, " % str_fun_join_s_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 - + tdSql.query("select 1-13 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): sql = "select ts ," - sql += "%s, " % str_fun_1 + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s, " % str_fun_2 - sql += "%s " % random.choice(self.t_select) + sql += "%s " % random.choice(self.t_select) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # tdSql.checkRows(300) # self.cur1.execute(sql) # TD-16039 elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select ts ," - sql += "%s, " % str_fun_1 + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s, " % str_fun_2 - sql += "%s " % random.choice(self.t_select) + sql += "%s " % random.choice(self.t_select) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # tdSql.checkRows(300) # self.cur1.execute(sql)# TD-16039 - + tdSql.query("select 1-14 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): sql = "select LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " - sql += "%s as asct1, " % str_fun_s_1 + sql += "%s as asct1, " % str_fun_s_1 sql += "%s as asct2" % str_fun_s_2 sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) @@ -2043,13 +2043,13 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ) ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " - sql += "%s as asct1, " % str_fun_s_1 + sql += "%s as asct1, " % str_fun_s_1 sql += "%s as asct2" % str_fun_s_2 sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) @@ -2057,54 +2057,54 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ) ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 - + tdSql.query("select 1-15 as str_nest from stable_1 limit 1;") for i in range(self.fornum): if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_s_1 - sql += "%s as asct1, " % str_fun_join_s_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s " % random.choice(self.q_select) + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) - sql += ") " + sql += ") " sql += "%s " % random.choice(self.order_desc_where) sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15955 tdSql.query(sql) - #TD-15955 self.cur1.execute(sql) + #TD-15955 self.cur1.execute(sql) elif (strlist == ['LENGTH','CHAR_LENGTH']): sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % str_fun_join_s_1 - sql += "%s as asct1, " % str_fun_join_s_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s " % random.choice(self.q_select) + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) - sql += ") " + sql += ") " sql += "%s " % random.choice(self.order_desc_where) sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15955 tdSql.query(sql) - #TD-15955 self.cur1.execute(sql) - + #TD-15955 self.cur1.execute(sql) + #taos -f sql startTime_taos_f = time.time() print("taos -f %s sql start!" %strlist) @@ -2112,169 +2112,169 @@ class TDTestCase: _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") print("taos -f %s sql over!" %strlist) endTime_taos_f = time.time() - print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) - - print("=========%s====over=============" %strlist) - + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %strlist) + def time_nest(self,timelist): - - print("==========%s===start=============" %timelist) - os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) - + + print("==========%s===start=============" %timelist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + self.dropandcreateDB_random("%s" %self.db_nest, 1) - + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMEZONE']): - time_functions = timelist - fun_fix_column = ['()'] + time_functions = timelist + fun_fix_column = ['()'] fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") - - fun_fix_column_j = ['()'] + + fun_fix_column_j = ['()'] fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") elif (timelist == ['TIMETRUNCATE']): - time_functions = timelist - - t = time.time() - t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) fun_fix_column = ['q_ts','ts','_c0','_C0','_rowts','1600000000000','1600000000000000','1600000000000000000', - '%d' %t, '%d000' %t, '%d000000' %t,'t_to_s'] - - timeunits = ['1u' , '1a' ,'1s', '1m' ,'1h', '1d'] - timeunit = str(random.sample(timeunits,1)).replace("[","").replace("]","").replace("'","") - - column_1 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] + '%d' %t, '%d000' %t, '%d000000' %t,'t_to_s'] + + timeunits = ['1u' , '1a' ,'1s', '1m' ,'1h', '1d'] + timeunit = str(random.sample(timeunits,1)).replace("[","").replace("]","").replace("'","") + + column_1 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] fun_column_1 = random.sample(time_functions,1)+random.sample(column_1,1) time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") time_fun_1 = str(time_fun_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) - - column_2 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] + + column_2 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] fun_column_2 = random.sample(time_functions,1)+random.sample(column_2,1) time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") time_fun_2 = str(time_fun_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) - - + + fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)', - '(%d)' %t, '(%d000)' %t, '(%d000000)' %t,'t_to_s'] - - column_j1 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t,'t_to_s'] + + column_j1 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] fun_column_join_1 = random.sample(time_functions,1)+random.sample(column_j1,1) time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") time_fun_join_1 = str(time_fun_join_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) - - column_j2 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] + + column_j2 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] fun_column_join_2 = random.sample(time_functions,1)+random.sample(column_j2,1) time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") time_fun_join_2 = str(time_fun_join_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) elif (timelist == ['TO_ISO8601']): - time_functions = timelist - - t = time.time() + time_functions = timelist + + t = time.time() fun_fix_column = ['(now())','(ts)','(q_ts)','(_rowts)','(_c0)','(_C0)', '(1600000000000)','(1600000000000000)','(1600000000000000000)', - '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] - + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") - + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") - + fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)','(now())', - '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] - + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") - + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") elif (timelist == ['TO_UNIXTIMESTAMP']): - time_functions = timelist - - t = time.time() - t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) - fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] - + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s) - + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s) - - fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] - + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s) - + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s) elif (timelist == ['TIMEDIFF']): - time_functions = timelist - - t = time.time() - t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) - fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] - + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s) - + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s) - - fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] - + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s) - + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s) elif (timelist == ['ELAPSED']): - time_functions = timelist - - fun_fix_column = ['(ts)','(q_ts)','(_c0)','(_C0)','(_rowts)','(ts,time_unit)','(_c0,time_unit)','(_C0,time_unit)','(_rowts,time_unit)'] - - time_units = ['nums','numm','numh','numd','numa'] - time_unit = str(random.sample(time_units,1)).replace("[","").replace("]","").replace("'","") - time_num1 = random.randint(0, 1000) - time_unit1 = time_unit.replace("num","%d" %time_num1) - time_num2 = random.randint(0, 1000) - time_unit2 = time_unit.replace("num","%d" %time_num2) - + time_functions = timelist + + fun_fix_column = ['(ts)','(q_ts)','(_c0)','(_C0)','(_rowts)','(ts,time_unit)','(_c0,time_unit)','(_C0,time_unit)','(_rowts,time_unit)'] + + time_units = ['nums','numm','numh','numd','numa'] + time_unit = str(random.sample(time_units,1)).replace("[","").replace("]","").replace("'","") + time_num1 = random.randint(0, 1000) + time_unit1 = time_unit.replace("num","%d" %time_num1) + time_num2 = random.randint(0, 1000) + time_unit2 = time_unit.replace("num","%d" %time_num2) + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1) - + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2) - - - fun_fix_column_j = ['(t1.ts)','(t1.q_ts)', '(t2.ts)','(t2.q_ts)','(t1.ts,time_unit)','(t1.q_ts,time_unit)','(t2.ts,time_unit)','(t2.q_ts,time_unit)'] - + + + fun_fix_column_j = ['(t1.ts)','(t1.q_ts)', '(t2.ts)','(t2.q_ts)','(t1.ts,time_unit)','(t1.q_ts,time_unit)','(t2.ts,time_unit)','(t2.q_ts,time_unit)'] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1) - + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2) - - + + elif (timelist == ['CAST']) : - str_functions = timelist + str_functions = timelist #下面的4个是全的,这个只是1个 i = random.randint(1,4) if i ==1: @@ -2282,33 +2282,33 @@ class TDTestCase: fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null', 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null'] type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED'] - - type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") - - type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") - + fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null', 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null', 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null', - 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] - - type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") - - type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' - time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") - + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + elif i==2: print('===========cast_2===========') fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4'] type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED'] - + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") @@ -2316,279 +2316,279 @@ class TDTestCase: type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") - + fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4', - 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] - - type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") - - type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' - time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") - + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + elif i==3: print('===========cast_3===========') fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8'] type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED'] - + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' - time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' - time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") - + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8', - 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] - - type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") - - type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' - time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") - + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + elif i==4: print('===========cast_4===========') fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts'] type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED'] - + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' - time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") - + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' - time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") - - fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] - - type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") - - type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' - time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") - + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + elif (timelist == ['CAST_1']) : - str_functions = timelist - + str_functions = timelist + print('===========cast_1===========') fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null', 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null'] type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED'] - - type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' - time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_1","") - - type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_1","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' - time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_1","") - + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_1","") + fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null', 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null', 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null', - 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] - - type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' - time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_1","") - - type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_1","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' - time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_1","") - + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_1","") + elif (timelist == ['CAST_2']) : - str_functions = timelist + str_functions = timelist print('===========cast_2===========') fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4'] type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED'] - + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' - time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_2","") + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_2","") type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' - time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_2","") - + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_2","") + fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4', - 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] - - type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' - time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_2","") - - type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_2","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' - time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_2","") - + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_2","") + elif (timelist == ['CAST_3']) : - str_functions = timelist + str_functions = timelist print('===========cast_3===========') fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8'] type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED'] - + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' - time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_3","") + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_3","") type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' - time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_3","") - + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_3","") + fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8', - 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] - - type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' - time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_3","") - - type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_3","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' - time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_3","") - + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_3","") + elif (timelist == ['CAST_4']) : - str_functions = timelist + str_functions = timelist print('===========cast_4===========') fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts'] type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED'] - + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' - time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_4","") - + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_4","") + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' - time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_4","") - - fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] - - type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_4","") + + fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' - time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_4","") - - type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_4","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' - time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_4","") - + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_4","") + tdSql.query("select 1-1 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): - sql = "select ts , timediff(asct1,now) from ( select " - sql += "%s as asct1, " % time_fun_1 - sql += "%s as asct2, " % time_fun_2 + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (timelist == ['TIMEZONE']) \ or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): - sql = "select ts , asct1,now(),today(),timezone() from ( select " - sql += "%s as asct1, " % time_fun_1 - sql += "%s as asct2, " % time_fun_2 + sql = "select ts , asct1,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (timelist == ['ELAPSED']) : - sql = "select max(asct1),now(),today(),timezone() from ( select " - sql += "%s as asct1, " % time_fun_1 - sql += "%s as asct2 " % time_fun_2 + sql = "select max(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 sql += "from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 1-2 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): sql = "select ts , timediff(asct1,now),now(),today(),timezone() from ( select " - sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct1, " % time_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s )" % random.choice(self.order_where) - sql += "%s " % random.choice(self.unionall_or_union) + sql += "%s " % random.choice(self.unionall_or_union) sql += "select ts , timediff(asct2,now),now(),today(),timezone() from ( select " - sql += "%s as asct2, " % time_fun_2 + sql += "%s as asct2, " % time_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) #sql += "%s " % random.choice(having_support) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) - #TD-15437 self.cur1.execute(sql) + #TD-15437 self.cur1.execute(sql) elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts , (asct1),now(),today(),timezone() from ( select " - sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct1, " % time_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s )" % random.choice(self.order_where) - sql += "%s " % random.choice(self.unionall_or_union) + sql += "%s " % random.choice(self.unionall_or_union) sql += "select ts , asct2,now(),today(),timezone() from ( select " - sql += "%s as asct2, " % time_fun_2 + sql += "%s as asct2, " % time_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) - #TD-15437 self.cur1.execute(sql) - elif (timelist == ['ELAPSED']) : + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : sql = "select min(asct1),now(),today(),timezone() from ( select " - sql += "%s as asct1 " % time_fun_1 + sql += "%s as asct1 " % time_fun_1 sql += " from regular_table_1 where " sql += "%s )" % random.choice(self.q_where) - sql += "%s " % random.choice(self.unionall_or_union) + sql += "%s " % random.choice(self.unionall_or_union) sql += "select avg(asct2),now(),today(),timezone() from ( select " - sql += "%s as asct2 " % time_fun_2 + sql += "%s as asct2 " % time_fun_2 sql += " from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) - self.cur1.execute(sql) - + self.cur1.execute(sql) + tdSql.query("select 1-3 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ @@ -2597,20 +2597,20 @@ class TDTestCase: sql += "%s as asct1, ts ," % time_fun_1 sql += "%s as asct2, " % time_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s select " % random.choice(self.unionall_or_union) sql += "%s as asct2, ts ," % time_fun_2 sql += "%s as asct1, " % time_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_2 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15473 tdSql.query(sql) #self.cur1.execute(sql) elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): @@ -2618,167 +2618,167 @@ class TDTestCase: sql += "%s as asct1, ts ," % time_fun_1 sql += "%s as asct2, " % time_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s select " % random.choice(self.unionall_or_union) sql += "%s as asct2, ts ," % time_fun_2 sql += "%s as asct1, " % time_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_2 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15473 tdSql.query(sql) #self.cur1.execute(sql) - elif (timelist == ['ELAPSED']) : + elif (timelist == ['ELAPSED']) : sql = "select abs(asct1),now(),today(),timezone() from ( select " sql += "%s as asct1," % time_fun_1 sql += "%s as asct2 " % time_fun_2 sql += "from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s select " % random.choice(self.unionall_or_union) sql += "%s as asct2," % time_fun_2 sql += "%s as asct1 " % time_fun_1 sql += "from regular_table_2 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 1-4 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "%s, " % time_fun_join_1 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts , (asct1) from ( select t1.ts as ts," - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "%s, " % time_fun_join_1 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) - elif (timelist == ['ELAPSED']) : + elif (timelist == ['ELAPSED']) : sql = "select floor(asct1) from ( select " - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "%s " % time_fun_join_1 + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s " % time_fun_join_1 sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s " % random.choice(self.q_u_or_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 1-5 as time_nest from stable_1 limit 1;") for i in range(self.fornum): - if (timelist == ['ELAPSED']) : + if (timelist == ['ELAPSED']) : sql = "select now(),today(),timezone(), " - sql += "%s, " % time_fun_1 + sql += "%s, " % time_fun_1 sql += "%s " % time_fun_2 sql += " from ( select * from regular_table_1 ) where " sql += "%s " % random.choice(self.q_where) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) else: sql = "select ts ,now(),today(),timezone(), " - sql += "%s, " % time_fun_1 + sql += "%s, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % time_fun_2 sql += " from ( select * from regular_table_1 ) where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) tdSql.query("select 1-6 as time_nest from stable_1 limit 1;") - for i in range(self.fornum): + for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "%s, " % time_fun_join_1 + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s )" % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts , (asct1) from ( select t1.ts as ts," - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "%s, " % time_fun_join_1 + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s )" % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) elif (timelist == ['ELAPSED']) : sql = "select (asct1)*111 from ( select " - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "%s " % time_fun_join_1 + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s " % time_fun_join_1 sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s )" % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 1-7 as time_nest from stable_1 limit 1;") @@ -2788,13 +2788,13 @@ class TDTestCase: sql = "select ts , timediff(asct1,now) from ( select " sql += "%s as asct1, ts ," % time_fun_1 sql += "%s as asct2, " % time_fun_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # TD-16039 # tdSql.checkRows(300) @@ -2803,13 +2803,13 @@ class TDTestCase: sql = "select ts , (asct1),now(),today(),timezone() from ( select " sql += "%s as asct1, ts ," % time_fun_1 sql += "%s as asct2, " % time_fun_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # TD-16039 # tdSql.checkRows(300) @@ -2821,11 +2821,11 @@ class TDTestCase: sql += "from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) #同时出现core:TD-16095和TD-16042 # self.cur1.execute(sql) - + tdSql.query("select 1-8 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ @@ -2835,13 +2835,13 @@ class TDTestCase: sql += "%s, " % random.choice(self.s_s_select) sql += "%s as asct1, ts ," % time_fun_1 sql += "%s as asct2, " % time_fun_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # TD-16039 # tdSql.checkRows(300) @@ -2852,17 +2852,17 @@ class TDTestCase: sql += "%s, " % random.choice(self.s_s_select) sql += "%s as asct1, ts ," % time_fun_1 sql += "%s as asct2, " % time_fun_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # TD-16039 # tdSql.checkRows(300) - # self.cur1.execute(sql) + # self.cur1.execute(sql) elif (timelist == ['ELAPSED']) : sql = "select floor(abs(asct1)),now(),today(),timezone() " sql += "from ( select " @@ -2873,22 +2873,22 @@ class TDTestCase: sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) # tdSql.query(sql) # TD-16039 - # self.cur1.execute(sql) + # self.cur1.execute(sql) tdSql.query("select 1-9 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "and %s " % random.choice(self.t_u_where) @@ -2896,18 +2896,18 @@ class TDTestCase: sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) TD-16039 # self.cur1.execute(sql) TD-16039 elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts , asct1 from ( select t1.ts as ts," - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "and %s " % random.choice(self.t_u_where) @@ -2915,25 +2915,25 @@ class TDTestCase: sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) - # self.cur1.execute(sql) # TD-16039 + # self.cur1.execute(sql) # TD-16039 elif (timelist == ['ELAPSED']) : sql = "select min(asct1*110) from ( select " - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1 " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "and %s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) - # self.cur1.execute(sql) # TD-16039 - + # self.cur1.execute(sql) # TD-16039 + self.restartDnodes() tdSql.query("select 1-10 as time_nest from stable_1 limit 1;") for i in range(self.fornum): @@ -2958,8 +2958,8 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) #TD-15437 self.cur1.execute(sql) elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): @@ -2982,10 +2982,10 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) - #TD-15437 self.cur1.execute(sql) + #TD-15437 self.cur1.execute(sql) elif (timelist == ['ELAPSED']) : sql = "select abs(asct1),now(),today(),timezone() from ( select " sql += "%s as asct1 ," % time_fun_1 @@ -3000,11 +3000,11 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) - #TD-15437 self.cur1.execute(sql) - + #TD-15437 self.cur1.execute(sql) + #3 inter union not support tdSql.query("select 1-11 as time_nest from stable_1 limit 1;") for i in range(self.fornum): @@ -3028,10 +3028,10 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) - # tdSql.query(sql)#TD-15473 - # self.cur1.execute(sql)#TD-15473 + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)#TD-15473 + # self.cur1.execute(sql)#TD-15473 elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts , (asct1,now()),(now(),asct2) from ( select " sql += "%s as asct1, ts ," % time_fun_1 @@ -3051,10 +3051,10 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) - # tdSql.query(sql)#TD-15473 - # self.cur1.execute(sql)#TD-15473 + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)#TD-15473 + # self.cur1.execute(sql)#TD-15473 elif (timelist == ['ELAPSED']) : sql = "select asct1+asct2,now(),today(),timezone() from ( select " sql += "%s as asct1, " % time_fun_1 @@ -3070,118 +3070,118 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql)#TD-15473 - self.cur1.execute(sql)#TD-15473 + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql)#TD-15473 + self.cur1.execute(sql)#TD-15473 tdSql.query("select 1-12 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts , asct1,now() from ( select t1.ts as ts," - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 elif (timelist == ['ELAPSED']) : sql = "select min(floor(asct1)),now() from ( select " - sql += "%s, " % time_fun_join_1 - sql += "%s as asct1 " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.limit1_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql)# TD-16039 - + tdSql.query("select 1-13 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): sql = "select ts , timediff(%s,now)," % time_fun_2 - sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % time_fun_2 - sql += "%s " % random.choice(self.t_select) + sql += "%s " % random.choice(self.t_select) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # tdSql.checkRows(300) # self.cur1.execute(sql) # TD-16039 elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts ,now(),today(),timezone(), " - sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % time_fun_2 - sql += "%s " % random.choice(self.t_select) + sql += "%s " % random.choice(self.t_select) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # tdSql.checkRows(300) # self.cur1.execute(sql) # TD-16039 elif (timelist == ['ELAPSED']) : sql = "select now(),today(),timezone(), " - sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct1, " % time_fun_1 sql += "%s " % time_fun_2 sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 - + tdSql.query("select 1-14 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select ts ts ," - sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct1, " % time_fun_1 sql += "%s as asct2" % time_fun_2 sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) @@ -3189,13 +3189,13 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ) ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts , (asct1),now(),(now()),asct2 from ( select ts ts ," - sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct1, " % time_fun_1 sql += "%s as asct2" % time_fun_2 sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) @@ -3203,83 +3203,83 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ) ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 elif (timelist == ['ELAPSED']) : sql = "select ts , (asct1)*asct2,now(),(now()) from ( select " - sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct1, " % time_fun_1 sql += "%s as asct2" % time_fun_2 sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.partiton_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ) ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 - + tdSql.query("select 1-15 as time_nest from stable_1 limit 1;") for i in range(self.fornum): if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ or (timelist == ['TO_UNIXTIMESTAMP']): sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select t1.ts as ts," - sql += "%s as asct2, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s " % random.choice(self.q_select) + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) - sql += ") " + sql += ") " sql += "%s " % random.choice(self.order_desc_where) sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) - # self.cur1.execute(sql) # TD-16039 + # self.cur1.execute(sql) # TD-16039 elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): sql = "select ts , asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select t1.ts as ts," - sql += "%s as asct2, " % time_fun_join_1 - sql += "%s as asct1, " % time_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s " % random.choice(self.q_select) + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) - sql += ") " + sql += ") " sql += "%s " % random.choice(self.order_desc_where) sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) # self.cur1.execute(sql) # TD-16039 elif (timelist == ['ELAPSED']) : sql = "select asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select " - sql += "%s as asct2, " % time_fun_join_1 - sql += "%s as asct1 " % time_fun_join_2 + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) - sql += ") " + sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) # TD-16039 - + #taos -f sql startTime_taos_f = time.time() print("taos -f %s sql start!" %timelist) @@ -3287,149 +3287,149 @@ class TDTestCase: _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") print("taos -f %s sql over!" %timelist) endTime_taos_f = time.time() - print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) - - print("=========%s====over=============" %timelist) + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %timelist) def base_nest(self,baselist): - - print("==========%s===start=============" %baselist) - os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) - + + print("==========%s===start=============" %baselist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + self.dropandcreateDB_random("%s" %self.db_nest, 1) - + if (baselist == ['A']) or (baselist == ['S']) or (baselist == ['F']) \ or (baselist == ['C']): - base_functions = baselist - fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] + base_functions = baselist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") - + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', - '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") elif (baselist == ['P']) or (baselist == ['M']) or (baselist == ['S'])or (baselist == ['T']): - base_functions = baselist - num = random.randint(0, 1000) + base_functions = baselist + num = random.randint(0, 1000) fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)', - '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] + '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) - + fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)', '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)', '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)', - '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] + '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) - + tdSql.query("select 1-1 as base_nest from stable_1 limit 1;") for i in range(self.fornum): - sql = "select ts , floor(asct1) from ( select " - sql += "%s as asct1, " % base_fun_1 - sql += "%s as asct2, " % base_fun_2 + sql = "select ts , floor(asct1) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct2, " % base_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) - + tdSql.query("select 1-2 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts , abs(asct1) from ( select " - sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct1, " % base_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s )" % random.choice(self.order_where) - sql += "%s " % random.choice(self.unionall_or_union) + sql += "%s " % random.choice(self.unionall_or_union) sql += "select ts , asct2 from ( select " - sql += "%s as asct2, " % base_fun_2 + sql += "%s as asct2, " % base_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts ts from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) #sql += "%s " % random.choice(having_support) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) - #TD-15437 self.cur1.execute(sql) - + #TD-15437 self.cur1.execute(sql) + tdSql.query("select 1-3 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts , min(asct1) from ( select " sql += "%s as asct1, ts ," % base_fun_1 sql += "%s as asct2, " % base_fun_2 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s select " % random.choice(self.unionall_or_union) sql += "%s as asct2, ts ," % base_fun_2 sql += "%s as asct1, " % base_fun_1 sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_2 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15473 tdSql.query(sql) #self.cur1.execute(sql) - + tdSql.query("select 1-4 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts , asct1 from ( select t1.ts as ts," - sql += "%s, " % base_fun_join_1 - sql += "%s as asct1, " % base_fun_join_2 - sql += "%s, " % base_fun_join_1 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "%s, " % base_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) - + tdSql.query("select 1-5 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts ," - sql += "%s, " % base_fun_1 + sql += "%s, " % base_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % base_fun_2 sql += " from ( select * from regular_table_1 ) where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) @@ -3437,19 +3437,19 @@ class TDTestCase: tdSql.query("select 1-6 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts , max(asct1) from ( select t1.ts as ts," - sql += "%s, " % base_fun_join_1 - sql += "%s as asct1, " % base_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "%s, " % base_fun_join_1 + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % base_fun_join_1 sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "and %s )" % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 1-7 as base_nest from stable_1 limit 1;") @@ -3457,18 +3457,18 @@ class TDTestCase: sql = "select ts , abs(asct1) from ( select " sql += "%s as asct1, ts ," % base_fun_1 sql += "%s as asct2, " % base_fun_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(300) self.cur1.execute(sql) - + tdSql.query("select 1-8 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts,floor(asct1) " @@ -3476,13 +3476,13 @@ class TDTestCase: sql += "%s, " % random.choice(self.s_s_select) sql += "%s as asct1, ts ," % base_fun_1 sql += "%s as asct2, " % base_fun_2 - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(300) @@ -3491,12 +3491,12 @@ class TDTestCase: tdSql.query("select 1-9 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts , max(asct1) from ( select t1.ts as ts," - sql += "%s, " % base_fun_join_1 - sql += "%s as asct1, " % base_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "and %s " % random.choice(self.t_u_where) @@ -3504,11 +3504,11 @@ class TDTestCase: sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + self.restartDnodes() tdSql.query("select 1-10 as base_nest from stable_1 limit 1;") for i in range(self.fornum): @@ -3531,11 +3531,11 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15437 tdSql.query(sql) #TD-15437 self.cur1.execute(sql) - + #3 inter union not support tdSql.query("select 1-11 as base_nest from stable_1 limit 1;") for i in range(self.fornum): @@ -3559,53 +3559,53 @@ class TDTestCase: sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15837 tdSql.query(sql) # self.cur1.execute(sql) tdSql.query("select 1-12 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts , max(asct1) from ( select t1.ts as ts," - sql += "%s, " % base_fun_join_1 - sql += "%s as asct1, " % base_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "and %s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 1-13 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts ," - sql += "%s, " % base_fun_1 + sql += "%s, " % base_fun_1 + sql += "%s, " % random.choice(self.q_select) sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) sql += "%s " % base_fun_2 - sql += "%s " % random.choice(self.t_select) + sql += "%s " % random.choice(self.t_select) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(300) self.cur1.execute(sql) - + tdSql.query("select 1-14 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select avg(asct1),count(asct2) from ( select " - sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct1, " % base_fun_1 sql += "%s as asct2" % base_fun_2 sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) @@ -3613,33 +3613,33 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ) ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) - self.cur1.execute(sql) - + self.cur1.execute(sql) + tdSql.query("select 1-15 as base_nest from stable_1 limit 1;") for i in range(self.fornum): sql = "select ts , max(asct1) from ( select t1.ts as ts," - sql += "%s, " % base_fun_join_1 - sql += "%s as asct1, " % base_fun_join_2 - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s " % random.choice(self.q_select) + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) - sql += ") " + sql += ") " sql += "%s " % random.choice(self.order_desc_where) sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) - self.cur1.execute(sql) - + self.cur1.execute(sql) + #taos -f sql startTime_taos_f = time.time() print("taos -f %s sql start!" %baselist) @@ -3647,35 +3647,35 @@ class TDTestCase: _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") print("taos -f %s sql over!" %baselist) endTime_taos_f = time.time() - print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) - - print("=========%s====over=============" %baselist) - + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %baselist) + def function_before_26(self): - + print('=====================2.6 old function start ===========') - os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) - + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + self.dropandcreateDB_random("%s" %self.db_nest, 1) - + #1 select * from (select column form regular_table where <\>\in\and\or order by) tdSql.query("select 1-1 from stable_1;") for i in range(self.fornum): #sql = "select ts , * from ( select " ===暂时不支持select * ,用下面这一行 sql = "select ts from ( select " sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) - - #1 outer union not support + + #1 outer union not support #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 1-2 from stable_1;") for i in range(self.fornum): @@ -3695,12 +3695,12 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) - + #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 1-2 from stable_1;") for i in range(self.fornum): @@ -3720,12 +3720,12 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(200) self.cur1.execute(sql) - + #1 inter union not support tdSql.query("select 1-3 from stable_1;") for i in range(self.fornum): @@ -3735,7 +3735,7 @@ class TDTestCase: sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "" + sql += "" sql += " union select " sql += "%s, " % random.choice(self.s_r_select) sql += "%s, " % random.choice(self.q_select) @@ -3743,12 +3743,12 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15607 tdSql.query(sql) #tdSql.checkRows(200) #self.cur1.execute(sql) - + tdSql.query("select 1-3 from stable_1;") for i in range(self.fornum): #sql = "select ts , * from ( select " @@ -3764,47 +3764,47 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15607 tdSql.query(sql) # tdSql.checkRows(300) #self.cur1.execute(sql) - - #join:select * from (select column form regular_table1,regular_table2 where t1.ts=t2.ts and <\>\in\and\or order by) + + #join:select * from (select column form regular_table1,regular_table2 where t1.ts=t2.ts and <\>\in\and\or order by) #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 1-4 from stable_1;") for i in range(self.fornum): #sql = "select ts , * from ( select t1.ts ," sql = "select * from ( select t1.ts ," - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) - sql += "and %s " % random.choice(self.q_u_or_where) + sql += "and %s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) - #2 select column from (select * form regular_table ) where <\>\in\and\or order by + #2 select column from (select * form regular_table ) where <\>\in\and\or order by #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 2-1 from stable_1;") for i in range(self.fornum): sql = "select ts ," sql += "%s, " % random.choice(self.s_r_select) - sql += "%s " % random.choice(self.q_select) + sql += "%s " % random.choice(self.q_select) sql += " from ( select * from regular_table_1 ) where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(100) self.cur1.execute(sql) @@ -3814,31 +3814,31 @@ class TDTestCase: tdSql.query("select 2-2 from stable_1;") for i in range(self.fornum): sql = "select ts , * from ( select t1.ts ," - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 ) where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.order_u_where) #sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.error(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) - #3 select * from (select column\tag form stable where <\>\in\and\or order by ) + #3 select * from (select column\tag form stable where <\>\in\and\or order by ) #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 3-1 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(300) @@ -3849,37 +3849,37 @@ class TDTestCase: sql += "%s " % random.choice(self.s_r_select) sql += "from ( select " sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(300) self.cur1.execute(sql) - # select ts,* from (select column\tag form stable1,stable2 where t1.ts = t2.ts and <\>\in\and\or order by ) + # select ts,* from (select column\tag form stable1,stable2 where t1.ts = t2.ts and <\>\in\and\or order by ) #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 3-2 from stable_1;") for i in range(self.fornum): sql = "select ts , * from ( select t1.ts , " - sql += "t1.%s, " % random.choice(self.s_s_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.s_s_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.s_s_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.s_s_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "%s " % random.choice(self.order_u_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # TD-15609 tdSql.query(sql) # tdSql.checkRows(100) #self.cur1.execute(sql) - + #3 outer union not support self.restartDnodes() tdSql.query("select 3-3 from stable_1;") @@ -3899,8 +3899,8 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(300) self.cur1.execute(sql) @@ -3920,12 +3920,12 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(600) self.cur1.execute(sql) - + #3 inter union not support tdSql.query("select 3-4 from stable_1;") for i in range(self.fornum): @@ -3943,8 +3943,8 @@ class TDTestCase: sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += ")" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15837 tdSql.query(sql) # self.cur1.execute(sql) @@ -3952,16 +3952,16 @@ class TDTestCase: tdSql.query("select 3-5 from stable_1;") for i in range(self.fornum): sql = "select * from ( select t1.ts ," - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # TD-15609 tdSql.query(sql) # tdSql.checkRows(100) #self.cur1.execute(sql) @@ -3969,34 +3969,34 @@ class TDTestCase: tdSql.query("select 3-6 from stable_1;") for i in range(self.fornum): sql = "select * from ( select t1.ts ," - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t1.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) - sql += "t2.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += ");" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # TD-15609 同上 tdSql.query(sql) # tdSql.checkRows(100) #self.cur1.execute(sql) - #4 select column from (select * form stable where <\>\in\and\or order by ) + #4 select column from (select * form stable where <\>\in\and\or order by ) #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 4-1 from stable_1;") for i in range(self.fornum): sql = "select ts , " sql += "%s, " % random.choice(self.q_select) - sql += "%s, " % random.choice(self.q_select) - sql += "%s " % random.choice(self.t_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % random.choice(self.t_select) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(300) self.cur1.execute(sql) @@ -4010,8 +4010,8 @@ class TDTestCase: sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15500 tdSql.query(sql) #self.cur1.execute(sql) @@ -4020,13 +4020,13 @@ class TDTestCase: for i in range(self.fornum): sql = "select distinct c5_1 " sql += " from ( select " - sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "%s " % random.choice(self.calc_select_in_ts) sql += " as c5_1 from stable_1 where " sql += "%s " % random.choice(self.qt_where) #sql += "%s " % random.choice(order_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) #tdSql.checkRows(1)有的函数还没有提交,会不返回结果,先忽略 self.cur1.execute(sql) @@ -4040,8 +4040,8 @@ class TDTestCase: sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_desc_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.error(sql) tdSql.query("select 6-1 from stable_1;") for i in range(self.fornum): @@ -4049,8 +4049,8 @@ class TDTestCase: sql += "%s " % random.choice(self.dt_select) sql += " from stable_1 where " sql += "%s ) ;" % random.choice(self.qt_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) @@ -4064,8 +4064,8 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] ) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.error(sql) #distinct 和 order by 不能混合使用 tdSql.query("select 7-1 from stable_1;") for i in range(self.fornum): @@ -4076,177 +4076,177 @@ class TDTestCase: #sql += "%s " % random.choice(order_desc_where) sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] ) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(1) self.cur1.execute(sql) #calc_select,TWA/Diff/Derivative/Irate are not allowed to apply to super table directly #8 select * from (select ts,calc form ragular_table where <\>\in\and\or order by ) - + # dcDB = self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 8-1 from stable_1;") for i in range(self.fornum): sql = "select * from ( select ts ," - sql += "%s " % random.choice(self.calc_select_support_ts) + sql += "%s " % random.choice(self.calc_select_support_ts) sql += "from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function self.cur1.execute(sql) tdSql.query("select 8-1 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_select_not_support_ts) + sql += "%s " % random.choice(self.calc_select_not_support_ts) sql += "from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) - #TD-15651 tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function #self.cur1.execute(sql) - + for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "%s " % random.choice(self.calc_select_in_ts) sql += "from regular_table_1 where " sql += "%s " % random.choice(self.q_where) #sql += "%s " % random.choice(order_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 8-2 from stable_1;") for i in range(self.fornum): sql = "select * from ( select t1.ts, " - sql += "%s " % random.choice(self.calc_select_in_support_ts_j) + sql += "%s " % random.choice(self.calc_select_in_support_ts_j) sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql)# 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql)# 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function self.cur1.execute(sql) for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) + sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) - #TD-15651 tdSql.query(sql) - ##top返回结果有问题 tdSql.checkRows(1) - #self.cur1.execute(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + ##top返回结果有问题 tdSql.checkRows(1) + #self.cur1.execute(sql) - #9 select * from (select ts,calc form stable where <\>\in\and\or order by ) + #9 select * from (select ts,calc form stable where <\>\in\and\or order by ) # self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 9-1 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_select_not_support_ts) + sql += "%s " % random.choice(self.calc_select_not_support_ts) sql += "from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) # self.cur1.execute(sql) tdSql.query("select 9-2 from stable_1;") for i in range(self.fornum): sql = "select * from ( select ts ," - sql += "%s " % random.choice(self.calc_select_support_ts) + sql += "%s " % random.choice(self.calc_select_support_ts) sql += "from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 9-3 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) + sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 9-4 from stable_1;") for i in range(self.fornum): sql = "select * from ( select t1.ts," - sql += "%s " % random.choice(self.calc_select_in_support_ts_j) + sql += "%s " % random.choice(self.calc_select_in_support_ts_j) sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += " and %s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - - #10 select calc from (select * form regualr_table where <\>\in\and\or order by ) + + #10 select calc from (select * form regualr_table where <\>\in\and\or order by ) tdSql.query("select 10-1 from stable_1;") for i in range(self.fornum): - sql = "select " - sql += "%s " % random.choice(self.calc_select_in_ts) + sql = "select " + sql += "%s " % random.choice(self.calc_select_in_ts) sql += "as calc10_1 from ( select * from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(1) self.cur1.execute(sql) - + #10-1 select calc from (select * form regualr_table where <\>\in\and\or order by ) - # rsDn = self.restartDnodes() + # rsDn = self.restartDnodes() # self.dropandcreateDB_random("%s" %db, 1) # rsDn = self.restartDnodes() tdSql.query("select 10-2 from stable_1;") for i in range(self.fornum): - sql = "select " - sql += "%s " % random.choice(self.calc_select_all) + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) sql += "as calc10_2 from ( select * from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) # tdSql.checkRows(1) #self.cur1.execute(sql) - #10-2 select calc from (select * form regualr_tables where <\>\in\and\or order by ) + #10-2 select calc from (select * form regualr_tables where <\>\in\and\or order by ) tdSql.query("select 10-3 from stable_1;") for i in range(self.fornum): - sql = "select " - sql += "%s as calc10_3 " % random.choice(self.calc_select_all) + sql = "select " + sql += "%s as calc10_3 " % random.choice(self.calc_select_all) sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += " and %s " % random.choice(self.q_u_or_where) @@ -4254,15 +4254,15 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " sql += "%s ;" % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 10-4 from stable_1;") for i in range(self.fornum): - sql = "select " - sql += "%s as calc10_4 " % random.choice(self.calc_select_all) + sql = "select " + sql += "%s as calc10_4 " % random.choice(self.calc_select_all) sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_or_where) sql += " and %s " % random.choice(self.q_u_or_where) @@ -4270,89 +4270,89 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) - # tdSql.checkRows(1) + # tdSql.checkRows(1) #self.cur1.execute(sql) - #11 select calc from (select * form stable where <\>\in\and\or order by limit ) + #11 select calc from (select * form stable where <\>\in\and\or order by limit ) tdSql.query("select 11-1 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "%s " % random.choice(self.calc_select_in_ts) sql += "as calc11_1 from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkRows(1) self.cur1.execute(sql) - #11-1 select calc from (select * form stable where <\>\in\and\or order by limit ) + #11-1 select calc from (select * form stable where <\>\in\and\or order by limit ) tdSql.query("select 11-2 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_select_all) + sql += "%s " % random.choice(self.calc_select_all) sql += "as calc11_1 from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice(self.limit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) #self.cur1.execute(sql) #不好计算结果 tdSql.checkRows(1) - + #11-2 select calc from (select * form stables where <\>\in\and\or order by limit ) tdSql.query("select 11-3 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_select_all) + sql += "%s " % random.choice(self.calc_select_all) sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 11-4 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_select_all) + sql += "%s " % random.choice(self.calc_select_all) sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) #self.cur1.execute(sql) - #12 select calc-diff from (select * form regualr_table where <\>\in\and\or order by limit ) + #12 select calc-diff from (select * form regualr_table where <\>\in\and\or order by limit ) ##self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 12-1 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_calculate_regular) + sql += "%s " % random.choice(self.calc_calculate_regular) sql += " from ( select * from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) ##目前derivative不支持 tdSql.query(sql) # tdSql.checkRows(1) #self.cur1.execute(sql) @@ -4360,14 +4360,14 @@ class TDTestCase: tdSql.query("select 12-2 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_calculate_regular) + sql += "%s " % random.choice(self.calc_calculate_regular) sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #目前derivative不支持 tdSql.query(sql) # tdSql.checkRows(1) #self.cur1.execute(sql) @@ -4375,14 +4375,14 @@ class TDTestCase: tdSql.query("select 12-2.2 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_calculate_regular) + sql += "%s " % random.choice(self.calc_calculate_regular) sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #目前derivative不支持 tdSql.query(sql) #self.cur1.execute(sql) @@ -4391,7 +4391,7 @@ class TDTestCase: self.restartDnodes() for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_calculate_regular) + sql += "%s " % random.choice(self.calc_calculate_regular) sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.group_where) @@ -4399,8 +4399,8 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #目前derivative不支持 tdSql.query(sql) #self.cur1.execute(sql) @@ -4408,7 +4408,7 @@ class TDTestCase: #join query does not support group by for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_calculate_regular_j) + sql += "%s " % random.choice(self.calc_calculate_regular_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "%s " % random.choice(self.group_where_j) @@ -4416,8 +4416,8 @@ class TDTestCase: #sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql) 目前de函数不支持,另外看看需要不需要将group by和pari by分开 #self.cur1.execute(sql) @@ -4425,7 +4425,7 @@ class TDTestCase: #join query does not support group by for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_calculate_regular_j) + sql += "%s " % random.choice(self.calc_calculate_regular_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.group_where_j) @@ -4433,43 +4433,43 @@ class TDTestCase: sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += " ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #derivative not support tdSql.query(sql) #self.cur1.execute(sql) - + #13 select calc-diff as diffns from (select * form stable where <\>\in\and\or order by limit ) tdSql.query("select 13-1 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_calculate_regular) + sql += "%s " % random.choice(self.calc_calculate_regular) sql += " as calc13_1 from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.orders_desc_where) sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #derivative not support tdSql.query(sql) #self.cur1.execute(sql) #14 select * from (select calc_aggregate_alls as agg from stable where <\>\in\and\or group by order by slimit soffset ) - # TD-5955 select * from ( select count (q_double) from stable_1 where t_bool = true or t_bool = false group by loc order by ts asc slimit 1 ) ; + # TD-5955 select * from ( select count (q_double) from stable_1 where t_bool = true or t_bool = false group by loc order by ts asc slimit 1 ) ; tdSql.query("select 14-1 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) - sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) - sql += "%s " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) + sql += "%s " % random.choice(self.calc_aggregate_all) sql += " as calc14_3 from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.group_where) sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice(self.slimit1_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15678 tdSql.query(sql) # tdSql.checkRows(1) #self.cur1.execute(sql) @@ -4478,9 +4478,9 @@ class TDTestCase: tdSql.query("select 14-2 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) - sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) - sql += "%s " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) + sql += "%s " % random.choice(self.calc_aggregate_all) sql += " as calc14_3 from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.group_where) @@ -4489,44 +4489,44 @@ class TDTestCase: sql += "%s " % random.choice(self.slimit1_where) sql += ") " sql += "%s " % random.choice(self.group_where) - tdLog.info(sql) + tdLog.info(sql) tdLog.info(len(sql)) #TD-15678 tdSql.query(sql) - # tdSql.checkRows(1) + # tdSql.checkRows(1) #self.cur1.execute(sql) #14-2 select * from (select calc_aggregate_all_js as agg from stables where <\>\in\and\or group by order by slimit soffset ) tdSql.query("select 14-3 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) - sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) - sql += "%s " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) + sql += "%s " % random.choice(self.calc_aggregate_all_j) sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 14-4 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) - sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) - sql += "%s " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) + sql += "%s " % random.choice(self.calc_aggregate_all_j) sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.partiton_where_j) sql += "%s " % random.choice(self.slimit1_where) sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) @@ -4534,50 +4534,50 @@ class TDTestCase: tdSql.query("select 15-1 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular) - sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular) - sql += "%s " % random.choice(self.calc_aggregate_regular) + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular) + sql += "%s " % random.choice(self.calc_aggregate_regular) sql += " as calc15_3 from regular_table_1 where " sql += "%s " % random.choice(self.q_where) - sql += "%s " % random.choice(self.group_where_regular) + sql += "%s " % random.choice(self.group_where_regular) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql) #Invalid function name: twa' # tdSql.checkRows(1) #self.cur1.execute(sql) - + tdSql.query("select 15-2 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) - sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) - sql += "%s " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s " % random.choice(self.calc_aggregate_regular_j) sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) - sql += "%s " % random.choice(self.group_where_regular_j) + sql += "%s " % random.choice(self.group_where_regular_j) sql += "%s " % random.choice(self.limit_u_where) sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql) #Invalid function name: twa' #self.cur1.execute(sql) tdSql.query("select 15-2.2 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) - sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) - sql += "%s " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s " % random.choice(self.calc_aggregate_regular_j) sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_or_where) - sql += "%s " % random.choice(self.group_where_regular_j) + sql += "%s " % random.choice(self.group_where_regular_j) sql += "%s " % random.choice(self.limit_u_where) sql += ") " sql += "%s ;" % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql) #Invalid function name: twa' #self.cur1.execute(sql) @@ -4585,222 +4585,222 @@ class TDTestCase: tdSql.query("select 15-3 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) - sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) - sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) sql += " as calc15_3 from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.group_where) sql += "%s " % random.choice(self.having_support) sql += "%s " % random.choice(self.order_desc_where) sql += ") " - sql += "order by calc15_1 " + sql += "order by calc15_1 " sql += "%s " % random.choice(self.limit_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql) #Invalid function name: twa',可能还的去掉order by #self.cur1.execute(sql) tdSql.query("select 15-4 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) - sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) - sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "%s " % random.choice(self.group_where_j) sql += "%s " % random.choice(self.having_support_j) #sql += "%s " % random.choice(orders_desc_where) sql += ") " - sql += "order by calc15_1 " + sql += "order by calc15_1 " sql += "%s " % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql) #'Invalid function name: irate' #self.cur1.execute(sql) tdSql.query("select 15-4.2 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) - sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) - sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.group_where_j) sql += "%s " % random.choice(self.having_support_j) sql += "%s " % random.choice(self.orders_desc_where) sql += ") " - sql += "order by calc15_1 " + sql += "order by calc15_1 " sql += "%s " % random.choice(self.limit_u_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15678 #tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 15-5 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) - sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) - sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) sql += " as calc15_3 from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.group_where) sql += ") " - sql += "order by calc15_1 " + sql += "order by calc15_1 " sql += "%s " % random.choice(self.limit_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql) #'Invalid function name: irate' #self.cur1.execute(sql) - #16 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by limit offset ) + #16 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by limit offset ) #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 16-1 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s as calc16_0 , " % random.choice(self.calc_calculate_all) - sql += "%s as calc16_1 , " % random.choice(self.calc_aggregate_all) - sql += "%s as calc16_2 " % random.choice(self.calc_select_in) + sql += "%s as calc16_1 , " % random.choice(self.calc_aggregate_all) + sql += "%s as calc16_2 " % random.choice(self.calc_select_in) sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.group_where) #sql += "%s " % random.choice(having_support)having和 partition不能混合使用 sql += ") " - sql += "order by calc16_0 " + sql += "order by calc16_0 " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #TD-15651 tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 16-2 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j) - sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) - #sql += ", %s as calc16_2 " % random.choice(self.calc_select_in_j) + sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) + #sql += ", %s as calc16_2 " % random.choice(self.calc_select_in_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += ") " - sql += "order by calc16_0 " + sql += "order by calc16_0 " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 16-2.2 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j) - sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) + sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += ") " - sql += "order by calc16_0 " + sql += "order by calc16_0 " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 16-3 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " - sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular) + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular) sql += " from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "limit 2 ) " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql)#Invalid function name: derivative' #self.cur1.execute(sql) tdSql.query("select 16-4 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " - sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "limit 2 ) " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql)#Invalid function name: derivative' #self.cur1.execute(sql) tdSql.query("select 16-4.2 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " - sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_or_where) sql += "limit 2 ) " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #tdSql.query(sql)#Invalid function name: derivative' #self.cur1.execute(sql) - + tdSql.query("select 16-5 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_all) - sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_regular) - sql += "%s as calc16_2 " % random.choice(self.calc_select_all) + sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_regular) + sql += "%s as calc16_2 " % random.choice(self.calc_select_all) sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.group_where) #sql += "%s " % random.choice(having_support) sql += ") " - sql += "order by calc16_1 " + sql += "order by calc16_1 " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) # tdSql.query(sql) #self.cur1.execute(sql) - + tdSql.query("select 16-6 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " - sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname) + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname) sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.group_where) sql += "limit 2 ) " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #Invalid function name: derivative' tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 16-7 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " - sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "limit 2 ) " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #Invalid function name: derivative' tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 16-8 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " - sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += "limit 2 ) " sql += "%s " % random.choice(self.limit1_where) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #Invalid function name: derivative' tdSql.query(sql) #self.cur1.execute(sql) @@ -4808,11 +4808,11 @@ class TDTestCase: #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 17-1 from stable_1;") for i in range(self.fornum): - #this is having_support , but tag-select cannot mix with last_row,other select can + #this is having_support , but tag-select cannot mix with last_row,other select can sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) sql += " from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.partiton_where) @@ -4822,36 +4822,36 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 17-2 from stable_1;") for i in range(self.fornum): - #this is having_support , but tag-select cannot mix with last_row,other select can + #this is having_support , but tag-select cannot mix with last_row,other select can sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "%s " % random.choice(self.interval_sliding) sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 17-2.2 from stable_1;") for i in range(self.fornum): - #this is having_support , but tag-select cannot mix with last_row,other select can + #this is having_support , but tag-select cannot mix with last_row,other select can sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.interval_sliding) @@ -4859,8 +4859,8 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) @@ -4869,8 +4869,8 @@ class TDTestCase: for i in range(self.fornum): #this is having_tagnot_support , because tag-select cannot mix with last_row... sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.partiton_where) @@ -4880,8 +4880,8 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) @@ -4889,8 +4889,8 @@ class TDTestCase: for i in range(self.fornum): #this is having_tagnot_support , because tag-select cannot mix with last_row... sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "%s " % random.choice(self.interval_sliding) @@ -4898,8 +4898,8 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) @@ -4907,8 +4907,8 @@ class TDTestCase: for i in range(self.fornum): #this is having_tagnot_support , because tag-select cannot mix with last_row... sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.interval_sliding) @@ -4916,17 +4916,17 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 17-5 from stable_1;") for i in range(self.fornum): - #having_not_support + #having_not_support sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) sql += " from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.partiton_where) @@ -4936,16 +4936,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 17-6 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.interval_sliding) @@ -4953,16 +4953,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 17-7 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.interval_sliding) @@ -4970,16 +4970,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 17-7.2 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.interval_sliding) @@ -4987,8 +4987,8 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) @@ -4996,8 +4996,8 @@ class TDTestCase: tdSql.query("select 17-8 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) sql += " from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.interval_sliding) @@ -5005,16 +5005,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 17-9 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.interval_sliding) @@ -5022,16 +5022,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 17-10 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.interval_sliding) @@ -5039,8 +5039,8 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) @@ -5048,8 +5048,8 @@ class TDTestCase: tdSql.query("select 18-1 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) sql += " from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.session_where) @@ -5058,16 +5058,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 18-2 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.session_u_where) @@ -5075,16 +5075,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 18-2.2 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.session_u_where) @@ -5092,8 +5092,8 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) @@ -5101,8 +5101,8 @@ class TDTestCase: tdSql.query("select 18-3 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) sql += " from stable_1_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.session_where) @@ -5111,16 +5111,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 18-4 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.session_u_where) @@ -5128,16 +5128,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 18-4.2 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.session_u_where) @@ -5145,16 +5145,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 18-5 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) sql += " from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.session_where) @@ -5163,16 +5163,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit1_where) sql += ") " #sql += "%s " % random.choice(interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 18-6 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.t_join_where) sql += "%s " % random.choice(self.session_u_where) @@ -5180,16 +5180,16 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 18-7 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " sql += "%s " % random.choice(self.qt_u_or_where) sql += "%s " % random.choice(self.session_u_where) @@ -5197,454 +5197,454 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - #19 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding + #19 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 19-1 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) sql += " from regular_table_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.state_window) #sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 19-2 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " - sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.q_u_where) sql += "%s " % random.choice(self.state_u_window) #sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 19-2.2 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " - sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.q_u_or_where) sql += "%s " % random.choice(self.state_u_window) #sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + tdSql.query("select 19-3 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) sql += " from stable_1_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.state_window) #sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 19-4 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " - sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.q_u_where) #sql += "%s " % random.choice(self.state_window) #sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 19-4.2 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " - sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.q_u_or_where) #sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 19-5 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) sql += " from stable_1 where " - sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.q_where) sql += "%s " % random.choice(self.state_window) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit1_where) sql += ") " sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.error(sql) #'STATE_WINDOW not support for super table query' - + tdSql.query("select 19-6 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " - sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.q_u_where) #sql += "%s " % random.choice(self.state_window) #sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) tdSql.query("select 19-7 from stable_1;") for i in range(self.fornum): sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) - sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) - sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " - sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.qt_u_or_where) #sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " #sql += "%s " % random.choice(self.interval_sliding) - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - #20 select * from (select calc_select_fills form regualr_table or stable where <\>\in\and\or fill_where group by order by limit offset ) + #20 select * from (select calc_select_fills form regualr_table or stable where <\>\in\and\or fill_where group by order by limit offset ) #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 20-1 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s , " % random.choice(self.calc_select_fill) - sql += "%s ," % random.choice(self.calc_select_fill) - sql += "%s " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) sql += " from stable_1 where " - sql += "%s " % random.choice(self.interp_where) + sql += "%s " % random.choice(self.interp_where) sql += "%s " % random.choice(self.fill_where) sql += "%s " % random.choice(self.group_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #interp不支持 tdSql.query(sql) #self.cur1.execute(sql) rsDn = self.restartDnodes() tdSql.query("select 20-2 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s , " % random.choice(self.calc_select_fill_j) - sql += "%s ," % random.choice(self.calc_select_fill_j) - sql += "%s " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s and " % random.choice(self.t_join_where) - sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % random.choice(self.interp_where_j) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #interp不支持 tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 20-2.2 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s , " % random.choice(self.calc_select_fill_j) - sql += "%s ," % random.choice(self.calc_select_fill_j) - sql += "%s " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " sql += "%s and " % random.choice(self.qt_u_or_where) - sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % random.choice(self.interp_where_j) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #interp不支持 tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 20-3 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s , " % random.choice(self.calc_select_fill) - sql += "%s ," % random.choice(self.calc_select_fill) - sql += "%s " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) sql += " from stable_1 where " - sql += "%s " % self.interp_where[2] + sql += "%s " % self.interp_where[2] sql += "%s " % random.choice(self.fill_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #interp不支持 tdSql.query(sql) #self.cur1.execute(sql) - + tdSql.query("select 20-4 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s , " % random.choice(self.calc_select_fill_j) - sql += "%s ," % random.choice(self.calc_select_fill_j) - sql += "%s " % random.choice(self.calc_select_fill_j) - sql += " from stable_1 t1, table_1 t2 where t1.ts = t2.ts and " + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1, table_1 t2 where t1.ts = t2.ts and " #sql += "%s and " % random.choice(self.t_join_where) sql += "%s " % self.interp_where_j[random.randint(0,5)] sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) #interp不支持 tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 20-4.2 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s , " % random.choice(self.calc_select_fill_j) - sql += "%s ," % random.choice(self.calc_select_fill_j) - sql += "%s " % random.choice(self.calc_select_fill_j) - sql += " from stable_1 t1, stable_1_1 t2 where t1.ts = t2.ts and " + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1, stable_1_1 t2 where t1.ts = t2.ts and " sql += "%s and " % random.choice(self.qt_u_or_where) sql += "%s " % self.interp_where_j[random.randint(0,5)] sql += "%s " % random.choice(self.fill_where) sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) ##interp不支持 tdSql.error(sql) #self.cur1.execute(sql) - + tdSql.query("select 20-5 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s , " % random.choice(self.calc_select_fill) - sql += "%s ," % random.choice(self.calc_select_fill) - sql += "%s " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) sql += " from regular_table_1 where " - sql += "%s " % self.interp_where[1] + sql += "%s " % self.interp_where[1] sql += "%s " % random.choice(self.fill_where) sql += "%s " % random.choice(self.order_where) sql += "%s " % random.choice(self.limit_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) ##interp不支持 tdSql.query(sql) #self.cur1.execute(sql) tdSql.query("select 20-6 from stable_1;") for i in range(self.fornum): - sql = "select * from ( select " + sql = "select * from ( select " sql += "%s , " % random.choice(self.calc_select_fill_j) - sql += "%s ," % random.choice(self.calc_select_fill_j) - sql += "%s " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " - #sql += "%s " % random.choice(self.interp_where_j) - sql += "%s " % self.interp_where_j[random.randint(0,5)] + #sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % self.interp_where_j[random.randint(0,5)] sql += "%s " % random.choice(self.order_u_where) sql += "%s " % random.choice(self.limit_u_where) sql += ") " - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) ##interp不支持 tdSql.query(sql) #self.cur1.execute(sql) #1 select * from (select * from (select * form regular_table where <\>\in\and\or order by limit )) tdSql.query("select 1-1 from stable_1;") - for i in range(self.fornum): + for i in range(self.fornum): # sql_start = "select * from ( " # sql_end = ")" for_num = random.randint(1, 15); - sql = "select * from (" * for_num + sql = "select * from (" * for_num sql += "select * from ( select * from ( select " - sql += "%s, " % random.choice(self.s_r_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += ")) " - sql += ")" * for_num - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) - + sql2 = "select * from ( select * from ( select " - sql2 += "%s, " % random.choice(self.s_r_select) - sql2 += "%s, " % random.choice(self.q_select) + sql2 += "%s, " % random.choice(self.s_r_select) + sql2 += "%s, " % random.choice(self.q_select) sql2 += "ts from regular_table_1 where " sql2 += "%s " % random.choice(self.q_where) - sql2 += ")) " - tdLog.info(sql2) - tdLog.info(len(sql2)) - tdSql.query(sql2) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) self.cur1.execute(sql2) - + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql ,1,10,3,3) self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3) - + for i in range(self.fornum): for_num = random.randint(1, 15); - sql = "select ts from (" * for_num + sql = "select ts from (" * for_num sql += "select * from ( select * from ( select " - sql += "%s, " % random.choice(self.s_r_select) - sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) sql += "ts from regular_table_1 where " sql += "%s " % random.choice(self.q_where) sql += ")) " - sql += ")" * for_num - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) - + sql2 = "select * from ( select * from ( select " - sql2 += "%s, " % random.choice(self.s_r_select) - sql2 += "%s, " % random.choice(self.q_select) + sql2 += "%s, " % random.choice(self.s_r_select) + sql2 += "%s, " % random.choice(self.q_select) sql2 += "ts from regular_table_1 where " sql2 += "%s " % random.choice(self.q_where) - sql2 += ")) " - tdLog.info(sql2) - tdLog.info(len(sql2)) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) tdSql.query(sql2) - self.cur1.execute(sql2) - + self.cur1.execute(sql2) + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) - + #2 select * from (select * from (select * form stable where <\>\in\and\or order by limit )) tdSql.query("select 2-1 from stable_1;") for i in range(self.fornum): for_num = random.randint(1, 15); - sql = "select * from (" * for_num + sql = "select * from (" * for_num sql += "select * from ( select * from ( select " - sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.qt_select) + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.qt_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += ")) " - sql += ")" * for_num - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) - + sql2 = "select * from ( select * from ( select " - sql2 += "%s, " % random.choice(self.s_s_select) - sql2 += "%s, " % random.choice(self.qt_select) + sql2 += "%s, " % random.choice(self.s_s_select) + sql2 += "%s, " % random.choice(self.qt_select) sql2 += "ts from stable_1 where " sql2 += "%s " % random.choice(self.q_where) - sql2 += ")) " - tdLog.info(sql2) - tdLog.info(len(sql2)) - tdSql.query(sql2) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) self.cur1.execute(sql2) - + self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3) - + for i in range(self.fornum): for_num = random.randint(1, 15); - sql = "select ts from (" * for_num + sql = "select ts from (" * for_num sql += "select * from ( select * from ( select " - sql += "%s, " % random.choice(self.s_s_select) - sql += "%s, " % random.choice(self.qt_select) + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.qt_select) sql += "ts from stable_1 where " sql += "%s " % random.choice(self.q_where) sql += ")) " - sql += ")" * for_num - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.query(sql) + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) self.cur1.execute(sql) - + sql2 = "select ts from ( select * from ( select " - sql2 += "%s, " % random.choice(self.s_s_select) - sql2 += "%s, " % random.choice(self.qt_select) + sql2 += "%s, " % random.choice(self.s_s_select) + sql2 += "%s, " % random.choice(self.qt_select) sql2 += "ts from stable_1 where " sql2 += "%s " % random.choice(self.q_where) - sql2 += ")) " - tdLog.info(sql2) - tdLog.info(len(sql2)) - tdSql.query(sql2) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) self.cur1.execute(sql2) - + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) - - #3 select ts ,calc from (select * form stable where <\>\in\and\or order by limit ) + + #3 select ts ,calc from (select * form stable where <\>\in\and\or order by limit ) #self.dropandcreateDB_random("%s" %db, 1) tdSql.query("select 3-1 from stable_1;") for i in range(self.fornum): sql = "select " - sql += "%s " % random.choice(self.calc_calculate_regular) + sql += "%s " % random.choice(self.calc_calculate_regular) sql += " from ( select * from stable_1 where " sql += "%s " % random.choice(self.qt_where) sql += "%s " % random.choice(self.orders_desc_where) sql += "%s " % random.choice(self.limit_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) - #'Invalid function name: derivative' tdSql.query(sql) - #self.cur1.execute(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + #'Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) #4 select * from (select calc form stable where <\>\in\and\or order by limit ) tdSql.query("select 4-1 from stable_1;") for i in range(self.fornum): sql = "select * from ( select " - sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "%s " % random.choice(self.calc_select_in_ts) sql += "from stable_1 where " sql += "%s " % random.choice(self.qt_where) #sql += "%s " % random.choice(self.order_desc_where) sql += "%s " % random.choice(self.limit_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) + tdLog.info(sql) + tdLog.info(len(sql)) tdSql.query(sql) self.cur1.execute(sql) - + #5 select ts ,tbname from (select * form stable where <\>\in\and\or order by limit ) tdSql.query("select 5-1 from stable_1;") for i in range(self.fornum): sql = "select ts , tbname , " - sql += "%s ," % random.choice(self.calc_calculate_regular) + sql += "%s ," % random.choice(self.calc_calculate_regular) sql += "%s ," % random.choice(self.dqt_select) sql += "%s " % random.choice(self.qt_select) sql += " from ( select * from stable_1 where " @@ -5652,9 +5652,9 @@ class TDTestCase: sql += "%s " % random.choice(self.orders_desc_where) sql += "%s " % random.choice(self.limit_where) sql += ") ;" - tdLog.info(sql) - tdLog.info(len(sql)) - tdSql.error(sql) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) #special sql tdSql.query("select 6-1 from stable_1;") @@ -5678,7 +5678,7 @@ class TDTestCase: tdSql.error(sql) sql = "select * from (select server_status() as status);" tdSql.error(sql) - + #taos -f sql startTime_taos_f = time.time() print("taos -f sql start!") @@ -5690,36 +5690,36 @@ class TDTestCase: print('=====================2.6 old function end ===========') - - + + def run(self): tdSql.prepare() - - startTime = time.time() - - # - - + + startTime = time.time() + + # + + #self.math_nest(['TAIL']) #TD-16009 # self.math_nest(['HYPERLOGLOG']) #TD-16038 # self.math_nest(['UNIQUE']) - + # # #self.function_before_26() #TD-16031 - - # self.math_nest(['ABS','SQRT']) #TD-16042 - # self.math_nest(['SIN','COS','TAN','ASIN','ACOS','ATAN']) + + # self.math_nest(['ABS','SQRT']) #TD-16042 + # self.math_nest(['SIN','COS','TAN','ASIN','ACOS','ATAN']) # self.math_nest(['POW','LOG']) #TD-16039 - # self.math_nest(['FLOOR','CEIL','ROUND']) - # #self.math_nest(['SAMPLE']) #TD-16017 + # self.math_nest(['FLOOR','CEIL','ROUND']) + # #self.math_nest(['SAMPLE']) #TD-16017 # #self.math_nest(['CSUM']) #TD-15936 crash - # self.math_nest(['MAVG']) - - self.str_nest(['LTRIM','RTRIM','LOWER','UPPER']) - self.str_nest(['LENGTH','CHAR_LENGTH']) - self.str_nest(['SUBSTR']) #TD-16042 + # self.math_nest(['MAVG']) + + self.str_nest(['LTRIM','RTRIM','LOWER','UPPER']) + self.str_nest(['LENGTH','CHAR_LENGTH']) + self.str_nest(['SUBSTR']) #TD-16042 self.str_nest(['CONCAT']) #TD-16002 偶尔 self.str_nest(['CONCAT_WS']) #TD-16002 偶尔 # self.time_nest(['CAST']) #TD-16017偶尔,放到time里起来弄 @@ -5727,21 +5727,21 @@ class TDTestCase: self.time_nest(['CAST_2']) self.time_nest(['CAST_3']) self.time_nest(['CAST_4']) - - - + + + # self.time_nest(['NOW','TODAY']) # # self.time_nest(['TIMEZONE']) # # self.time_nest(['TIMETRUNCATE']) #TD-16039 # self.time_nest(['TO_ISO8601']) # self.time_nest(['TO_UNIXTIMESTAMP'])#core多 # self.time_nest(['ELAPSED']) - + endTime = time.time() print("total time %ds" % (endTime - startTime)) - + def stop(self): From 51399762c29659a533bb529a6c473f5571e51a77 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 24/56] fix test cases --- tests/system-test/2-query/To_unixtimestamp.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/system-test/2-query/To_unixtimestamp.py b/tests/system-test/2-query/To_unixtimestamp.py index 8dc995b178..60d5cc7b72 100644 --- a/tests/system-test/2-query/To_unixtimestamp.py +++ b/tests/system-test/2-query/To_unixtimestamp.py @@ -13,11 +13,11 @@ class TDTestCase: tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) # name of normal table - self.ntbname = 'ntb' + self.ntbname = 'ntb' # name of stable - self.stbname = 'stb' + self.stbname = 'stb' # structure of column - self.column_dict = { + self.column_dict = { 'ts':'timestamp', 'c1':'int', 'c2':'float', @@ -25,13 +25,13 @@ class TDTestCase: 'c4':'nchar(20)' } # structure of tag - self.tag_dict = { + self.tag_dict = { 't0':'int' } # number of child tables - self.tbnum = 2 + self.tbnum = 2 # values of tag,the number of values should equal to tbnum - self.tag_values = [ + self.tag_values = [ f'10', f'100' ] @@ -42,7 +42,7 @@ class TDTestCase: ] self.error_param = [1,'now()'] - + def run(self): # sourcery skip: extract-duplicate-method tdSql.prepare() tdLog.printNoPrefix("==========step1:create tables==========") @@ -93,11 +93,11 @@ class TDTestCase: tdSql.query("select ts from ntb where to_unixtimestamp('1970-01-01T08:00:00+08:00')=0") tdSql.checkRows(3) - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) From 7fbef9cbbe17c920fecc02117fb5578733a09b6c Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 13 Jul 2022 14:41:50 +0800 Subject: [PATCH 25/56] fix test cases --- tests/system-test/2-query/unique.py | 92 ++++++++++++++--------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py index 4467dcb471..fce0d21f4d 100644 --- a/tests/system-test/2-query/unique.py +++ b/tests/system-test/2-query/unique.py @@ -11,14 +11,14 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - + def prepare_datas(self): tdSql.execute( '''create table stb1 @@ -26,7 +26,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -68,7 +68,7 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def test_errors(self): error_sql_lists = [ "select unique from t1", @@ -119,40 +119,40 @@ class TDTestCase: "select unique(c1) , diff(c1) from stb1 partition by tbname", #"select unique(c1) , abs(c1) from stb1 partition by tbname", # support #"select unique(c1) , c1 from stb1 partition by tbname" # support - + ] for error_sql in error_sql_lists: tdSql.error(error_sql) pass - + def support_types(self): other_no_value_types = [ - "select unique(ts) from t1" , + "select unique(ts) from t1" , "select unique(c7) from t1", "select unique(c8) from t1", "select unique(c9) from t1", - "select unique(ts) from ct1" , + "select unique(ts) from ct1" , "select unique(c7) from ct1", "select unique(c8) from ct1", "select unique(c9) from ct1", - "select unique(ts) from ct3" , + "select unique(ts) from ct3" , "select unique(c7) from ct3", "select unique(c8) from ct3", "select unique(c9) from ct3", - "select unique(ts) from ct4" , + "select unique(ts) from ct4" , "select unique(c7) from ct4", "select unique(c8) from ct4", "select unique(c9) from ct4", - "select unique(ts) from stb1 partition by tbname" , + "select unique(ts) from stb1 partition by tbname" , "select unique(c7) from stb1 partition by tbname", "select unique(c8) from stb1 partition by tbname", - "select unique(c9) from stb1 partition by tbname" + "select unique(c9) from stb1 partition by tbname" ] - + for type_sql in other_no_value_types: tdSql.query(type_sql) tdLog.info("support type ok , sql is : %s"%type_sql) - + type_sql_lists = [ "select unique(c1) from t1", "select unique(c2) from t1", @@ -182,8 +182,8 @@ class TDTestCase: "select unique(c5) from stb1 partition by tbname", "select unique(c6) from stb1 partition by tbname", - "select unique(c6) as alisb from stb1 partition by tbname", - "select unique(c6) alisb from stb1 partition by tbname", + "select unique(c6) as alisb from stb1 partition by tbname", + "select unique(c6) alisb from stb1 partition by tbname", ] for type_sql in type_sql_lists: @@ -194,18 +194,18 @@ class TDTestCase: origin_sql = unique_sql.replace("unique(","").replace(")","") tdSql.query(unique_sql) unique_result = tdSql.queryResult - + unique_datas = [] for elem in unique_result: unique_datas.append(elem[0]) unique_datas.sort(key=lambda x: (x is None, x)) - + tdSql.query(origin_sql) origin_result = tdSql.queryResult origin_datas = [] for elem in origin_result: origin_datas.append(elem[0]) - + pre_unique = [] for elem in origin_datas: if elem in pre_unique: @@ -221,7 +221,7 @@ class TDTestCase: def basic_unique_function(self): - # basic query + # basic query tdSql.query("select c1 from ct3") tdSql.checkRows(0) tdSql.query("select c1 from t1") @@ -242,19 +242,19 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query("select unique(c6) from ct3") - # will support _rowts mix with + # will support _rowts mix with # tdSql.query("select unique(c6),_rowts from ct3") - + # auto check for t1 table # used for regular table tdSql.query("select unique(c1) from t1") - + tdSql.query("desc t1") col_lists_rows = tdSql.queryResult col_lists = [] for col_name in col_lists_rows: col_lists.append(col_name[0]) - + for col in col_lists: self.check_unique_table(f"select unique({col}) from t1") @@ -269,17 +269,17 @@ class TDTestCase: #tdSql.error("select unique(c1),tbname from ct1") #support #tdSql.error("select unique(c1),t1 from ct1") #support - # unique with common col + # unique with common col #tdSql.error("select unique(c1) ,ts from ct1") #tdSql.error("select unique(c1) ,c1 from ct1") - # unique with scalar function + # unique with scalar function #tdSql.error("select unique(c1) ,abs(c1) from ct1") tdSql.error("select unique(c1) , unique(c2) from ct1") #tdSql.error("select unique(c1) , abs(c2)+2 from ct1") - - # unique with aggregate function + + # unique with aggregate function tdSql.error("select unique(c1) ,sum(c1) from ct1") tdSql.error("select unique(c1) ,max(c1) from ct1") tdSql.error("select unique(c1) ,csum(c1) from ct1") @@ -306,7 +306,7 @@ class TDTestCase: tdSql.checkData(7, 0, 1) tdSql.checkData(8, 0, 0) - # unique with union all + # unique with union all tdSql.query("select unique(c1) from ct4 union all select c1 from ct1") tdSql.checkRows(23) tdSql.query("select unique(c1) from ct4 union all select distinct(c1) from ct4") @@ -314,8 +314,8 @@ class TDTestCase: tdSql.query("select unique(c2) from ct4 union all select abs(c2)/2 from ct4") tdSql.checkRows(22) - # unique with join - # prepare join datas with same ts + # unique with join + # prepare join datas with same ts tdSql.execute(" use db ") tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") @@ -371,7 +371,7 @@ class TDTestCase: tdSql.checkRows(10) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, -7.000000000) - + # bug for stable #partition by tbname @@ -380,8 +380,8 @@ class TDTestCase: # tdSql.query(" select unique(c1) from stb1 partition by tbname ") # tdSql.checkRows(21) - - # group by + + # group by tdSql.error("select unique(c1) from ct1 group by c1") tdSql.error("select unique(c1) from ct1 group by tbname") @@ -393,7 +393,7 @@ class TDTestCase: tdSql.checkRows(4) - # bug need fix + # bug need fix # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname") # tdSql.checkRows(4) @@ -411,7 +411,7 @@ class TDTestCase: tdSql.checkRows(4) - # # bug need fix + # # bug need fix # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname ") # tdSql.checkRows(2) # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname order by tbname ") @@ -430,7 +430,7 @@ class TDTestCase: tdSql.query(" select unique(t1) from stb1 partition by tbname ") tdSql.checkRows(2) - # nest query + # nest query tdSql.query(" select unique(c1) from (select _rowts , t1 ,c1 , tbname from stb1 ) ") tdSql.checkRows(11) tdSql.checkData(0,0,6) @@ -439,7 +439,7 @@ class TDTestCase: tdSql.checkRows(2) tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) - + def check_boundary_values(self): tdSql.execute("drop database if exists bound_test") @@ -467,11 +467,11 @@ class TDTestCase: tdSql.execute( f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.error( f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.query("select unique(c2) from sub1_bound order by 1 desc") tdSql.checkRows(5) tdSql.checkData(0,0,9223372036854775807) @@ -480,22 +480,22 @@ class TDTestCase: tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: floor basic query ============") + tdLog.printNoPrefix("==========step4: floor basic query ============") self.basic_unique_function() - tdLog.printNoPrefix("==========step5: floor boundary query ============") + tdLog.printNoPrefix("==========step5: floor boundary query ============") self.check_boundary_values() From 79bbda564ef6e6a26a35457c75fde5c80aa54627 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 14:55:02 +0800 Subject: [PATCH 26/56] test: comment out unstable case --- tests/system-test/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 269f83a139..ca4850e433 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -148,7 +148,7 @@ python3 ./test.py -f 7-tmq/subscribeDb2.py python3 ./test.py -f 7-tmq/subscribeDb3.py #python3 ./test.py -f 7-tmq/subscribeDb4.py python3 ./test.py -f 7-tmq/subscribeStb.py -python3 ./test.py -f 7-tmq/subscribeStb0.py +#python3 ./test.py -f 7-tmq/subscribeStb0.py python3 ./test.py -f 7-tmq/subscribeStb1.py python3 ./test.py -f 7-tmq/subscribeStb2.py python3 ./test.py -f 7-tmq/subscribeStb3.py From 85d2e6f1be2ee64c48c21078dad0df9bf9f2326b Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 15:02:44 +0800 Subject: [PATCH 27/56] test: restore some 2.0 case --- tests/script/general/cache/testSuite.sim | 3 --- tests/script/jenkins/basic.txt | 5 +++++ .../{general => tsim}/cache/new_metrics.sim | 16 ---------------- .../{general => tsim}/cache/restart_metrics.sim | 9 +-------- .../{general => tsim}/cache/restart_table.sim | 9 +-------- 5 files changed, 7 insertions(+), 35 deletions(-) delete mode 100644 tests/script/general/cache/testSuite.sim rename tests/script/{general => tsim}/cache/new_metrics.sim (94%) rename tests/script/{general => tsim}/cache/restart_metrics.sim (93%) rename tests/script/{general => tsim}/cache/restart_table.sim (90%) diff --git a/tests/script/general/cache/testSuite.sim b/tests/script/general/cache/testSuite.sim deleted file mode 100644 index f09ece89b6..0000000000 --- a/tests/script/general/cache/testSuite.sim +++ /dev/null @@ -1,3 +0,0 @@ -run general/cache/new_metrics.sim -run general/cache/restart_table.sim -run general/cache/restart_metrics.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 13a28aa527..34e1ef3bdc 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -12,6 +12,11 @@ ./test.sh -f tsim/user/privilege_db.sim ./test.sh -f tsim/user/privilege_sysinfo.sim +# ---- cache +./test.sh -f tsim/cache/new_metrics.sim +./test.sh -f tsim/cache/restart_table.sim +./test.sh -f tsim/cache/restart_metrics.sim + ## ---- db ./test.sh -f tsim/db/alter_option.sim # ./test.sh -f tsim/db/alter_replica_13.sim diff --git a/tests/script/general/cache/new_metrics.sim b/tests/script/tsim/cache/new_metrics.sim similarity index 94% rename from tests/script/general/cache/new_metrics.sim rename to tests/script/tsim/cache/new_metrics.sim index eb9b042483..af7db90070 100644 --- a/tests/script/general/cache/new_metrics.sim +++ b/tests/script/tsim/cache/new_metrics.sim @@ -1,11 +1,6 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $i = 0 @@ -38,7 +33,6 @@ while $i < 5 endw print =============== step2 - sql select * from $tb order by ts desc print ===>rows $rows, data $data01 if $rows != 20 then @@ -67,7 +61,6 @@ if $data00 != 100 then endi print =============== step3 - sql show stables if $rows != 1 then return -1 @@ -75,12 +68,8 @@ endi if $data00 != $mt then return -1 endi -if $data04 != 5 then - return -1 -endi print =============== step4 - while $i < 10 $tb = $tbPrefix . $i sql create table $tb using $mt tags( 1 ) @@ -99,7 +88,6 @@ sql reset query cache sleep 1000 print =============== step5 - sql select * from $tb order by ts desc print ===>rows $rows, data $data01 if $rows != 20 then @@ -128,7 +116,6 @@ if $data00 != 200 then endi print =============== step6 - sql show stables if $rows != 1 then return -1 @@ -136,8 +123,5 @@ endi if $data00 != $mt then return -1 endi -if $data04 != 10 then - return -1 -endi system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/cache/restart_metrics.sim b/tests/script/tsim/cache/restart_metrics.sim similarity index 93% rename from tests/script/general/cache/restart_metrics.sim rename to tests/script/tsim/cache/restart_metrics.sim index a1b2365b2a..e144a49bf7 100644 --- a/tests/script/general/cache/restart_metrics.sim +++ b/tests/script/tsim/cache/restart_metrics.sim @@ -1,14 +1,9 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect -print ======================== dnode1 start +print ======================== dnode1 start $i = 0 $dbPrefix = ca_rm_db $tbPrefix = ca_rm_tb @@ -49,9 +44,7 @@ endi print =============== step2 system sh/exec.sh -n dnode1 -s stop -sleep 3000 system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start print =============== step3 diff --git a/tests/script/general/cache/restart_table.sim b/tests/script/tsim/cache/restart_table.sim similarity index 90% rename from tests/script/general/cache/restart_table.sim rename to tests/script/tsim/cache/restart_table.sim index 1f7d982a28..b450f6c654 100644 --- a/tests/script/general/cache/restart_table.sim +++ b/tests/script/tsim/cache/restart_table.sim @@ -1,14 +1,9 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect -print ======================== dnode1 start +print ======================== dnode1 start $i = 0 $dbPrefix = ca_rt_db $tbPrefix = ca_rt_tb @@ -33,9 +28,7 @@ endi print =============== step2 system sh/exec.sh -n dnode1 -s stop -sleep 3000 system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start print =============== step3 From 56b1d11beb1f36188681af3c2ea92b07388102fa Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 13 Jul 2022 15:06:27 +0800 Subject: [PATCH 28/56] fix(query): prepare the output buffer before assign daata. --- source/libs/executor/src/executorimpl.c | 2 +- source/libs/executor/src/scanoperator.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 6f4a6b805d..760d7e55c8 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4334,6 +4334,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pTaskInfo->code = code; return NULL; } + code = extractTableSchemaInfo(pHandle, pTableScanNode->scan.uid, pTaskInfo); if (code) { pTaskInfo->code = terrno; @@ -4349,7 +4350,6 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) { return createExchangeOperatorInfo(pHandle->pMsgCb->clientRpc, (SExchangePhysiNode*)pPhyNode, pTaskInfo); - } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; STimeWindowAggSupp twSup = { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 66703502eb..c7112ab8a6 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1153,10 +1153,11 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock SOperatorInfo* pOperator = pInfo->pStreamScanOp; SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo; + blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows); + pInfo->pRes->info.rows = pBlock->info.rows; pInfo->pRes->info.uid = pBlock->info.uid; pInfo->pRes->info.type = STREAM_NORMAL; - pInfo->pRes->info.capacity = pBlock->info.rows; uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t)); if (groupIdPre) { @@ -2415,6 +2416,7 @@ int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId); return TSDB_CODE_SUCCESS; } + pTableListInfo->needSortTableByGroupId = pTableScanNode->groupSort; code = generateGroupIdMap(pTableListInfo, pHandle, pTableScanNode->pGroupTags); if (code != TSDB_CODE_SUCCESS) { From 161880224847ba0c71fc043243a0680e1419e833 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 15:07:26 +0800 Subject: [PATCH 29/56] test: restore some 2.0 case --- tests/script/general/column/testSuite.sim | 3 --- tests/script/jenkins/basic.txt | 7 ++++++- tests/script/{general => tsim}/column/commit.sim | 10 ---------- tests/script/{general => tsim}/column/metrics.sim | 10 ---------- tests/script/{general => tsim}/column/table.sim | 9 --------- 5 files changed, 6 insertions(+), 33 deletions(-) delete mode 100644 tests/script/general/column/testSuite.sim rename tests/script/{general => tsim}/column/commit.sim (99%) rename tests/script/{general => tsim}/column/metrics.sim (99%) rename tests/script/{general => tsim}/column/table.sim (99%) diff --git a/tests/script/general/column/testSuite.sim b/tests/script/general/column/testSuite.sim deleted file mode 100644 index f60d197e95..0000000000 --- a/tests/script/general/column/testSuite.sim +++ /dev/null @@ -1,3 +0,0 @@ -run general/column/commit.sim -run general/column/metrics.sim -run general/column/table.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 34e1ef3bdc..afe5527380 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -17,7 +17,12 @@ ./test.sh -f tsim/cache/restart_table.sim ./test.sh -f tsim/cache/restart_metrics.sim -## ---- db +# ---- column +./test.sh -f tsim/column/commit.sim +./test.sh -f tsim/column/metrics.sim +./test.sh -f tsim/column/table.sim + +# ---- db ./test.sh -f tsim/db/alter_option.sim # ./test.sh -f tsim/db/alter_replica_13.sim # ./test.sh -f tsim/db/alter_replica_31.sim diff --git a/tests/script/general/column/commit.sim b/tests/script/tsim/column/commit.sim similarity index 99% rename from tests/script/general/column/commit.sim rename to tests/script/tsim/column/commit.sim index 008bec3bf9..43aebb4902 100644 --- a/tests/script/general/column/commit.sim +++ b/tests/script/tsim/column/commit.sim @@ -1,11 +1,6 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print =============== step1 @@ -25,7 +20,6 @@ if $rows != 1 then endi print =============== step2 - sql insert into d3.t1 values (now -300d,0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ); sql insert into d3.t1 values (now-200d,1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ); sql insert into d3.t1 values (now-150d,2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 ); @@ -38,7 +32,6 @@ sql insert into d3.t1 values (now,8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 sql insert into d3.t1 values (now+1d,9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 ); print =============== step3 - sql select * from d3.mt if $rows != 10 then return -1 @@ -89,12 +82,9 @@ endi print =============== step4 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 print =============== step5 - sql select * from d3.mt if $rows != 10 then return -1 diff --git a/tests/script/general/column/metrics.sim b/tests/script/tsim/column/metrics.sim similarity index 99% rename from tests/script/general/column/metrics.sim rename to tests/script/tsim/column/metrics.sim index 580e2320cd..a492f5a2f9 100644 --- a/tests/script/general/column/metrics.sim +++ b/tests/script/tsim/column/metrics.sim @@ -1,11 +1,6 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print =============== step1 @@ -26,7 +21,6 @@ if $rows != 1 then endi print =============== step2 - sql insert into d2.t1 values (now,0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ) sql insert into d2.t1 values (now+1m,1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ) sql insert into d2.t1 values (now+2m,2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 ) @@ -58,7 +52,6 @@ sql insert into d2.t2 values (now+8m,8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , sql insert into d2.t2 values (now+9m,9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 ) print =============== step3 - sql select * from d2.mt if $rows != 20 then return -1 @@ -157,12 +150,9 @@ endi print =============== step4 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 print =============== step5 - sql select * from d2.mt if $rows != 20 then return -1 diff --git a/tests/script/general/column/table.sim b/tests/script/tsim/column/table.sim similarity index 99% rename from tests/script/general/column/table.sim rename to tests/script/tsim/column/table.sim index 46d5de1e82..07948ebce3 100644 --- a/tests/script/general/column/table.sim +++ b/tests/script/tsim/column/table.sim @@ -1,11 +1,6 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print =============== step1 @@ -19,7 +14,6 @@ if $rows != 1 then endi print =============== step2 - sql insert into d1.t1 values (now,0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ) sql insert into d1.t1 values (now+1m,1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ) sql insert into d1.t1 values (now+2m,2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 ) @@ -36,7 +30,6 @@ sql insert into d1.t1 values (now+8m,8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , sql insert into d1.t1 values (now+9m,9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 ) print ======= step3 - sql select * from d1.t1 print select * from d1.t1 => rows $rows if $rows != 10 then @@ -129,9 +122,7 @@ endi print =============== step4 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 print ============== step5 From 0030888d1d9764e4224e0e4274bf0ab340d4e578 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 15:13:48 +0800 Subject: [PATCH 30/56] test: restore some 2.0 case --- tests/script/general/compress/testSuite.sim | 4 ---- tests/script/jenkins/basic.txt | 6 ++++++ tests/script/{general => tsim}/compress/commitlog.sim | 10 +--------- tests/script/{general => tsim}/compress/compress.sim | 9 --------- tests/script/{general => tsim}/compress/compress2.sim | 8 -------- tests/script/{general => tsim}/compress/uncompress.sim | 10 +--------- 6 files changed, 8 insertions(+), 39 deletions(-) delete mode 100644 tests/script/general/compress/testSuite.sim rename tests/script/{general => tsim}/compress/commitlog.sim (95%) rename tests/script/{general => tsim}/compress/compress.sim (94%) rename tests/script/{general => tsim}/compress/compress2.sim (94%) rename tests/script/{general => tsim}/compress/uncompress.sim (94%) diff --git a/tests/script/general/compress/testSuite.sim b/tests/script/general/compress/testSuite.sim deleted file mode 100644 index 3573985c8a..0000000000 --- a/tests/script/general/compress/testSuite.sim +++ /dev/null @@ -1,4 +0,0 @@ -run general/compress/commitlog.sim -run general/compress/compress2.sim -run general/compress/compress.sim -run general/compress/uncompress.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index afe5527380..b24ca5f85e 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -22,6 +22,12 @@ ./test.sh -f tsim/column/metrics.sim ./test.sh -f tsim/column/table.sim +# ---- compress +./test.sh -f tsim/compress/commitlog.sim +./test.sh -f tsim/compress/compress2.sim +./test.sh -f tsim/compress/compress.sim +./test.sh -f tsim/compress/uncompress.sim + # ---- db ./test.sh -f tsim/db/alter_option.sim # ./test.sh -f tsim/db/alter_replica_13.sim diff --git a/tests/script/general/compress/commitlog.sim b/tests/script/tsim/compress/commitlog.sim similarity index 95% rename from tests/script/general/compress/commitlog.sim rename to tests/script/tsim/compress/commitlog.sim index e8eab6ed0c..d90780bd6c 100644 --- a/tests/script/general/compress/commitlog.sim +++ b/tests/script/tsim/compress/commitlog.sim @@ -1,14 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c comp -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect -print ============================ dnode1 start +print ============================ dnode1 start $i = 0 $dbPrefix = db $tbPrefix = tb @@ -18,7 +13,6 @@ $tb = $tbPrefix . $i $N = 2000 print =============== step1 - sql create database $db sql use $db sql create table $tb (ts timestamp, b bool, t tinyint, s smallint, i int, big bigint, str binary(256)) @@ -87,9 +81,7 @@ endi print =============== step4 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 print =============== step5 diff --git a/tests/script/general/compress/compress.sim b/tests/script/tsim/compress/compress.sim similarity index 94% rename from tests/script/general/compress/compress.sim rename to tests/script/tsim/compress/compress.sim index cecfe61229..766f97450c 100644 --- a/tests/script/general/compress/compress.sim +++ b/tests/script/tsim/compress/compress.sim @@ -1,15 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c comp -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ============================ dnode1 start - $i = 0 $dbPrefix = db $tbPrefix = tb @@ -19,7 +13,6 @@ $tb = $tbPrefix . $i $N = 2000 print =============== step1 - sql create database $db sql use $db sql create table $tb (ts timestamp, b bool, t tinyint, s smallint, i int, big bigint, str binary(256)) @@ -82,9 +75,7 @@ endi print =============== step4 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 print =============== step5 diff --git a/tests/script/general/compress/compress2.sim b/tests/script/tsim/compress/compress2.sim similarity index 94% rename from tests/script/general/compress/compress2.sim rename to tests/script/tsim/compress/compress2.sim index 1e6868eaa6..87e50cce5b 100644 --- a/tests/script/general/compress/compress2.sim +++ b/tests/script/tsim/compress/compress2.sim @@ -1,15 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c comp -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ============================ dnode1 start - $i = 0 $dbPrefix = db $tbPrefix = tb @@ -82,9 +76,7 @@ endi print =============== step4 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 print =============== step5 diff --git a/tests/script/general/compress/uncompress.sim b/tests/script/tsim/compress/uncompress.sim similarity index 94% rename from tests/script/general/compress/uncompress.sim rename to tests/script/tsim/compress/uncompress.sim index 49945dcb79..ccd5db4b0c 100644 --- a/tests/script/general/compress/uncompress.sim +++ b/tests/script/tsim/compress/uncompress.sim @@ -1,13 +1,9 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c comp -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect -print ============================ dnode1 start +print ============================ dnode1 start $i = 0 $dbPrefix = cp_cp_db $tbPrefix = cp_cp_tb @@ -17,7 +13,6 @@ $tb = $tbPrefix . $i $N = 2000 print =============== step1 - sql create database $db sql use $db @@ -81,12 +76,9 @@ endi print =============== step4 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 2000 print =============== step5 - $i = 0 $db = $dbPrefix . $i $tb = $tbPrefix . $i From 0cd7d1fb13c48130f246767d25fc545be2a7e346 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 13 Jul 2022 15:28:21 +0800 Subject: [PATCH 31/56] fix: fix last row reader close memory issues --- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 5c09c7663f..127e9d2b02 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -22,11 +22,11 @@ typedef struct SLastrowReader { SVnode* pVnode; STSchema* pSchema; uint64_t uid; - char** transferBuf; // todo remove it soon - int32_t numOfCols; - int32_t type; - int32_t tableIndex; // currently returned result tables - SArray* pTableList; // table id list + char** transferBuf; // todo remove it soon + int32_t numOfCols; + int32_t type; + int32_t tableIndex; // currently returned result tables + SArray* pTableList; // table id list } SLastrowReader; static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) { @@ -94,12 +94,15 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t tsdbLastrowReaderClose(void* pReader) { SLastrowReader* p = pReader; - for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) { - taosMemoryFreeClear(p->transferBuf[i]); + if (p->pSchema != NULL) { + for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) { + taosMemoryFreeClear(p->transferBuf[i]); + } + + taosMemoryFree(p->transferBuf); + taosMemoryFree(p->pSchema); } - taosMemoryFree(p->pSchema); - taosMemoryFree(p->transferBuf); taosMemoryFree(pReader); return TSDB_CODE_SUCCESS; } From 511c39a0cfa3c8bdca7581b981b19ba1ef4b0fb9 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Wed, 13 Jul 2022 15:55:38 +0800 Subject: [PATCH 32/56] refactor(sync): add skiplist entry cache --- source/libs/sync/inc/syncRaftEntry.h | 54 +++- source/libs/sync/src/syncRaftEntry.c | 274 ++++++++++++++++-- source/libs/sync/src/syncRespMgr.c | 2 +- source/libs/sync/test/CMakeLists.txt | 14 + source/libs/sync/test/syncEntryCacheTest.cpp | 233 +++------------- source/libs/sync/test/syncHashCacheTest.cpp | 277 +++++++++++++++++++ 6 files changed, 633 insertions(+), 221 deletions(-) create mode 100644 source/libs/sync/test/syncHashCacheTest.cpp diff --git a/source/libs/sync/inc/syncRaftEntry.h b/source/libs/sync/inc/syncRaftEntry.h index 82d5c0a6ea..fdfabf12a3 100644 --- a/source/libs/sync/inc/syncRaftEntry.h +++ b/source/libs/sync/inc/syncRaftEntry.h @@ -26,6 +26,7 @@ extern "C" { #include "syncInt.h" #include "syncMessage.h" #include "taosdef.h" +#include "tskiplist.h" typedef struct SSyncRaftEntry { uint32_t bytes; @@ -58,29 +59,52 @@ void syncEntryLog(const SSyncRaftEntry* pObj); void syncEntryLog2(char* s, const SSyncRaftEntry* pObj); //----------------------------------- -typedef struct SRaftEntryCache { +typedef struct SRaftEntryHashCache { SHashObj* pEntryHash; int32_t maxCount; int32_t currentCount; TdThreadMutex mutex; SSyncNode* pSyncNode; +} SRaftEntryHashCache; + +SRaftEntryHashCache* raftCacheCreate(SSyncNode* pSyncNode, int32_t maxCount); +void raftCacheDestroy(SRaftEntryHashCache* pCache); +int32_t raftCachePutEntry(struct SRaftEntryHashCache* pCache, SSyncRaftEntry* pEntry); +int32_t raftCacheGetEntry(struct SRaftEntryHashCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry); +int32_t raftCacheGetEntryP(struct SRaftEntryHashCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry); +int32_t raftCacheDelEntry(struct SRaftEntryHashCache* pCache, SyncIndex index); +int32_t raftCacheGetAndDel(struct SRaftEntryHashCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry); +int32_t raftCacheClear(struct SRaftEntryHashCache* pCache); + +cJSON* raftCache2Json(SRaftEntryHashCache* pObj); +char* raftCache2Str(SRaftEntryHashCache* pObj); +void raftCachePrint(SRaftEntryHashCache* pObj); +void raftCachePrint2(char* s, SRaftEntryHashCache* pObj); +void raftCacheLog(SRaftEntryHashCache* pObj); +void raftCacheLog2(char* s, SRaftEntryHashCache* pObj); + +//----------------------------------- +typedef struct SRaftEntryCache { + SSkipList* pSkipList; + int32_t maxCount; + int32_t currentCount; + TdThreadMutex mutex; + SSyncNode* pSyncNode; } SRaftEntryCache; -SRaftEntryCache* raftCacheCreate(SSyncNode* pSyncNode, int32_t maxCount); -void raftCacheDestroy(SRaftEntryCache* pCache); -int32_t raftCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* pEntry); -int32_t raftCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry); -int32_t raftCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry); -int32_t raftCacheDelEntry(struct SRaftEntryCache* pCache, SyncIndex index); -int32_t raftCacheGetAndDel(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry); -int32_t raftCacheClear(struct SRaftEntryCache* pCache); +SRaftEntryCache* raftEntryCacheCreate(SSyncNode* pSyncNode, int32_t maxCount); +void raftEntryCacheDestroy(SRaftEntryCache* pCache); +int32_t raftEntryCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* pEntry); +int32_t raftEntryCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry); +int32_t raftEntryCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry); +int32_t raftEntryCacheClear(struct SRaftEntryCache* pCache, int32_t count); -cJSON* raftCache2Json(SRaftEntryCache* pObj); -char* raftCache2Str(SRaftEntryCache* pObj); -void raftCachePrint(SRaftEntryCache* pObj); -void raftCachePrint2(char* s, SRaftEntryCache* pObj); -void raftCacheLog(SRaftEntryCache* pObj); -void raftCacheLog2(char* s, SRaftEntryCache* pObj); +cJSON* raftEntryCache2Json(SRaftEntryCache* pObj); +char* raftEntryCache2Str(SRaftEntryCache* pObj); +void raftEntryCachePrint(SRaftEntryCache* pObj); +void raftEntryCachePrint2(char* s, SRaftEntryCache* pObj); +void raftEntryCacheLog(SRaftEntryCache* pObj); +void raftEntryCacheLog2(char* s, SRaftEntryCache* pObj); #ifdef __cplusplus } diff --git a/source/libs/sync/src/syncRaftEntry.c b/source/libs/sync/src/syncRaftEntry.c index 465584a40f..89e67fab28 100644 --- a/source/libs/sync/src/syncRaftEntry.c +++ b/source/libs/sync/src/syncRaftEntry.c @@ -198,8 +198,8 @@ void syncEntryLog2(char* s, const SSyncRaftEntry* pObj) { } //----------------------------------- -SRaftEntryCache* raftCacheCreate(SSyncNode* pSyncNode, int32_t maxCount) { - SRaftEntryCache* pCache = taosMemoryMalloc(sizeof(SRaftEntryCache)); +SRaftEntryHashCache* raftCacheCreate(SSyncNode* pSyncNode, int32_t maxCount) { + SRaftEntryHashCache* pCache = taosMemoryMalloc(sizeof(SRaftEntryHashCache)); if (pCache == NULL) { sError("vgId:%d raft cache create error", pSyncNode->vgId); return NULL; @@ -220,7 +220,7 @@ SRaftEntryCache* raftCacheCreate(SSyncNode* pSyncNode, int32_t maxCount) { return pCache; } -void raftCacheDestroy(SRaftEntryCache* pCache) { +void raftCacheDestroy(SRaftEntryHashCache* pCache) { if (pCache != NULL) { taosThreadMutexLock(&(pCache->mutex)); taosHashCleanup(pCache->pEntryHash); @@ -233,7 +233,7 @@ void raftCacheDestroy(SRaftEntryCache* pCache) { // success, return 1 // max count, return 0 // error, return -1 -int32_t raftCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* pEntry) { +int32_t raftCachePutEntry(struct SRaftEntryHashCache* pCache, SSyncRaftEntry* pEntry) { taosThreadMutexLock(&(pCache->mutex)); if (pCache->currentCount >= pCache->maxCount) { @@ -259,7 +259,7 @@ int32_t raftCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* pEntry // success, return 0 // error, return -1 // not exist, return -1, terrno = TSDB_CODE_WAL_LOG_NOT_EXIST -int32_t raftCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) { +int32_t raftCacheGetEntry(struct SRaftEntryHashCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) { if (ppEntry == NULL) { return -1; } @@ -292,7 +292,7 @@ int32_t raftCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index, SSync // success, return 0 // error, return -1 // not exist, return -1, terrno = TSDB_CODE_WAL_LOG_NOT_EXIST -int32_t raftCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) { +int32_t raftCacheGetEntryP(struct SRaftEntryHashCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) { if (ppEntry == NULL) { return -1; } @@ -321,7 +321,7 @@ int32_t raftCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index, SSyn return -1; } -int32_t raftCacheDelEntry(struct SRaftEntryCache* pCache, SyncIndex index) { +int32_t raftCacheDelEntry(struct SRaftEntryHashCache* pCache, SyncIndex index) { taosThreadMutexLock(&(pCache->mutex)); taosHashRemove(pCache->pEntryHash, &index, sizeof(index)); --(pCache->currentCount); @@ -329,7 +329,7 @@ int32_t raftCacheDelEntry(struct SRaftEntryCache* pCache, SyncIndex index) { return 0; } -int32_t raftCacheGetAndDel(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) { +int32_t raftCacheGetAndDel(struct SRaftEntryHashCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) { if (ppEntry == NULL) { return -1; } @@ -362,7 +362,7 @@ int32_t raftCacheGetAndDel(struct SRaftEntryCache* pCache, SyncIndex index, SSyn return -1; } -int32_t raftCacheClear(struct SRaftEntryCache* pCache) { +int32_t raftCacheClear(struct SRaftEntryHashCache* pCache) { taosThreadMutexLock(&(pCache->mutex)); taosHashClear(pCache->pEntryHash); pCache->currentCount = 0; @@ -371,7 +371,7 @@ int32_t raftCacheClear(struct SRaftEntryCache* pCache) { } //----------------------------------- -cJSON* raftCache2Json(SRaftEntryCache* pCache) { +cJSON* raftCache2Json(SRaftEntryHashCache* pCache) { char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); @@ -402,41 +402,283 @@ cJSON* raftCache2Json(SRaftEntryCache* pCache) { } cJSON* pJson = cJSON_CreateObject(); - cJSON_AddItemToObject(pJson, "SRaftEntryCache", pRoot); + cJSON_AddItemToObject(pJson, "SRaftEntryHashCache", pRoot); return pJson; } -char* raftCache2Str(SRaftEntryCache* pCache) { +char* raftCache2Str(SRaftEntryHashCache* pCache) { cJSON* pJson = raftCache2Json(pCache); char* serialized = cJSON_Print(pJson); cJSON_Delete(pJson); return serialized; } -void raftCachePrint(SRaftEntryCache* pCache) { +void raftCachePrint(SRaftEntryHashCache* pCache) { char* serialized = raftCache2Str(pCache); printf("raftCachePrint | len:%" PRIu64 " | %s \n", strlen(serialized), serialized); fflush(NULL); taosMemoryFree(serialized); } -void raftCachePrint2(char* s, SRaftEntryCache* pCache) { +void raftCachePrint2(char* s, SRaftEntryHashCache* pCache) { char* serialized = raftCache2Str(pCache); printf("raftCachePrint2 | len:%" PRIu64 " | %s | %s \n", strlen(serialized), s, serialized); fflush(NULL); taosMemoryFree(serialized); } -void raftCacheLog(SRaftEntryCache* pCache) { +void raftCacheLog(SRaftEntryHashCache* pCache) { char* serialized = raftCache2Str(pCache); sTrace("raftCacheLog | len:%" PRIu64 " | %s", strlen(serialized), serialized); taosMemoryFree(serialized); } -void raftCacheLog2(char* s, SRaftEntryCache* pCache) { +void raftCacheLog2(char* s, SRaftEntryHashCache* pCache) { if (gRaftDetailLog) { char* serialized = raftCache2Str(pCache); sTraceLong("raftCacheLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized); taosMemoryFree(serialized); } +} + +//----------------------------------- +static char* keyFn(const void* pData) { + SSyncRaftEntry* pEntry = (SSyncRaftEntry*)pData; + return (char*)(&(pEntry->index)); +} + +static int cmpFn(const void* p1, const void* p2) { return memcmp(p1, p2, sizeof(SyncIndex)); } + +SRaftEntryCache* raftEntryCacheCreate(SSyncNode* pSyncNode, int32_t maxCount) { + SRaftEntryCache* pCache = taosMemoryMalloc(sizeof(SRaftEntryCache)); + if (pCache == NULL) { + sError("vgId:%d raft cache create error", pSyncNode->vgId); + return NULL; + } + + pCache->pSkipList = + tSkipListCreate(MAX_SKIP_LIST_LEVEL, TSDB_DATA_TYPE_BINARY, sizeof(SyncIndex), cmpFn, SL_ALLOW_DUP_KEY, keyFn); + if (pCache->pSkipList == NULL) { + sError("vgId:%d raft cache create hash error", pSyncNode->vgId); + return NULL; + } + + taosThreadMutexInit(&(pCache->mutex), NULL); + pCache->maxCount = maxCount; + pCache->currentCount = 0; + pCache->pSyncNode = pSyncNode; + + return pCache; +} + +void raftEntryCacheDestroy(SRaftEntryCache* pCache) { + if (pCache != NULL) { + taosThreadMutexLock(&(pCache->mutex)); + tSkipListDestroy(pCache->pSkipList); + taosThreadMutexUnlock(&(pCache->mutex)); + taosThreadMutexDestroy(&(pCache->mutex)); + taosMemoryFree(pCache); + } +} + +// success, return 1 +// max count, return 0 +// error, return -1 +int32_t raftEntryCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* pEntry) { + taosThreadMutexLock(&(pCache->mutex)); + + if (pCache->currentCount >= pCache->maxCount) { + taosThreadMutexUnlock(&(pCache->mutex)); + return 0; + } + + SSkipListNode* pSkipListNode = tSkipListPut(pCache->pSkipList, pEntry); + ASSERT(pSkipListNode != NULL); + ++(pCache->currentCount); + + do { + char eventLog[128]; + snprintf(eventLog, sizeof(eventLog), "raft cache add, type:%s,%d, type2:%s,%d, index:%" PRId64 ", bytes:%d", + TMSG_INFO(pEntry->msgType), pEntry->msgType, TMSG_INFO(pEntry->originalRpcType), pEntry->originalRpcType, + pEntry->index, pEntry->bytes); + syncNodeEventLog(pCache->pSyncNode, eventLog); + } while (0); + + taosThreadMutexUnlock(&(pCache->mutex)); + return 1; +} + +// find one, return 1 +// not found, return 0 +// error, return -1 +int32_t raftEntryCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) { + ASSERT(ppEntry != NULL); + SSyncRaftEntry* pEntry = NULL; + int32_t code = raftEntryCacheGetEntryP(pCache, index, &pEntry); + if (code == 1) { + *ppEntry = taosMemoryMalloc(pEntry->bytes); + memcpy(*ppEntry, pEntry, pEntry->bytes); + } else { + *ppEntry = NULL; + } + return code; +} + +// find one, return 1 +// not found, return 0 +// error, return -1 +int32_t raftEntryCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) { + taosThreadMutexLock(&(pCache->mutex)); + + SyncIndex index2 = index; + int32_t code = 0; + + SArray* entryPArray = tSkipListGet(pCache->pSkipList, (char*)(&index2)); + int32_t arraySize = taosArrayGetSize(entryPArray); + if (arraySize == 1) { + SSkipListNode** ppNode = (SSkipListNode**)taosArrayGet(entryPArray, 0); + ASSERT(*ppNode != NULL); + *ppEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(*ppNode); + code = 1; + + } else if (arraySize == 0) { + code = 0; + + } else { + ASSERT(0); + + code = -1; + } + taosArrayDestroy(entryPArray); + + taosThreadMutexUnlock(&(pCache->mutex)); + return code; +} + +// count = -1, clear all +// count >= 0, clear count +// return -1, error +// return delete count +int32_t raftEntryCacheClear(struct SRaftEntryCache* pCache, int32_t count) { + taosThreadMutexLock(&(pCache->mutex)); + int32_t returnCnt = 0; + + if (count == -1) { + // clear all + SSkipListIterator* pIter = tSkipListCreateIter(pCache->pSkipList); + while (tSkipListIterNext(pIter)) { + SSkipListNode* pNode = tSkipListIterGet(pIter); + ASSERT(pNode != NULL); + SSyncRaftEntry* pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(pNode); + syncEntryDestory(pEntry); + ++returnCnt; + } + tSkipListDestroyIter(pIter); + + tSkipListDestroy(pCache->pSkipList); + pCache->pSkipList = + tSkipListCreate(MAX_SKIP_LIST_LEVEL, TSDB_DATA_TYPE_BINARY, sizeof(SyncIndex), cmpFn, SL_ALLOW_DUP_KEY, keyFn); + ASSERT(pCache->pSkipList != NULL); + + } else { + // clear count + int i = 0; + SSkipListIterator* pIter = tSkipListCreateIter(pCache->pSkipList); + SArray* delNodeArray = taosArrayInit(0, sizeof(SSkipListNode*)); + + // free entry + while (tSkipListIterNext(pIter)) { + SSkipListNode* pNode = tSkipListIterGet(pIter); + ASSERT(pNode != NULL); + if (i++ >= count) { + break; + } + + // sDebug("push pNode:%p", pNode); + taosArrayPush(delNodeArray, &pNode); + ++returnCnt; + SSyncRaftEntry* pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(pNode); + syncEntryDestory(pEntry); + } + tSkipListDestroyIter(pIter); + + // delete skiplist node + int32_t arraySize = taosArrayGetSize(delNodeArray); + for (int32_t i = 0; i < arraySize; ++i) { + SSkipListNode** ppNode = taosArrayGet(delNodeArray, i); + // sDebug("get pNode:%p", *ppNode); + tSkipListRemoveNode(pCache->pSkipList, *ppNode); + } + taosArrayDestroy(delNodeArray); + } + + pCache->currentCount -= returnCnt; + taosThreadMutexUnlock(&(pCache->mutex)); + return returnCnt; +} + +cJSON* raftEntryCache2Json(SRaftEntryCache* pCache) { + char u64buf[128] = {0}; + cJSON* pRoot = cJSON_CreateObject(); + + if (pCache != NULL) { + taosThreadMutexLock(&(pCache->mutex)); + + snprintf(u64buf, sizeof(u64buf), "%p", pCache->pSyncNode); + cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf); + cJSON_AddNumberToObject(pRoot, "currentCount", pCache->currentCount); + cJSON_AddNumberToObject(pRoot, "maxCount", pCache->maxCount); + cJSON* pEntries = cJSON_CreateArray(); + cJSON_AddItemToObject(pRoot, "entries", pEntries); + + SSkipListIterator* pIter = tSkipListCreateIter(pCache->pSkipList); + while (tSkipListIterNext(pIter)) { + SSkipListNode* pNode = tSkipListIterGet(pIter); + ASSERT(pNode != NULL); + SSyncRaftEntry* pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(pNode); + cJSON_AddItemToArray(pEntries, syncEntry2Json(pEntry)); + } + tSkipListDestroyIter(pIter); + + taosThreadMutexUnlock(&(pCache->mutex)); + } + + cJSON* pJson = cJSON_CreateObject(); + cJSON_AddItemToObject(pJson, "SRaftEntryCache", pRoot); + return pJson; +} + +char* raftEntryCache2Str(SRaftEntryCache* pObj) { + cJSON* pJson = raftEntryCache2Json(pObj); + char* serialized = cJSON_Print(pJson); + cJSON_Delete(pJson); + return serialized; +} + +void raftEntryCachePrint(SRaftEntryCache* pObj) { + char* serialized = raftEntryCache2Str(pObj); + printf("raftEntryCachePrint | len:%" PRIu64 " | %s \n", strlen(serialized), serialized); + fflush(NULL); + taosMemoryFree(serialized); +} + +void raftEntryCachePrint2(char* s, SRaftEntryCache* pObj) { + char* serialized = raftEntryCache2Str(pObj); + printf("raftEntryCachePrint2 | len:%" PRIu64 " | %s | %s \n", strlen(serialized), s, serialized); + fflush(NULL); + taosMemoryFree(serialized); +} + +void raftEntryCacheLog(SRaftEntryCache* pObj) { + char* serialized = raftEntryCache2Str(pObj); + sTrace("raftEntryCacheLog | len:%" PRIu64 " | %s", strlen(serialized), serialized); + taosMemoryFree(serialized); +} + +void raftEntryCacheLog2(char* s, SRaftEntryCache* pObj) { + if (gRaftDetailLog) { + char* serialized = raftEntryCache2Str(pObj); + sTraceLong("raftEntryCacheLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized); + taosMemoryFree(serialized); + } } \ No newline at end of file diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c index 8e477a5159..97e5816038 100644 --- a/source/libs/sync/src/syncRespMgr.c +++ b/source/libs/sync/src/syncRespMgr.c @@ -127,7 +127,7 @@ void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) { while (pStub) { size_t len; - void *key = taosHashGetKey(pStub, &len); + void * key = taosHashGetKey(pStub, &len); uint64_t *pSeqNum = (uint64_t *)key; int64_t nowMS = taosGetTimestampMs(); diff --git a/source/libs/sync/test/CMakeLists.txt b/source/libs/sync/test/CMakeLists.txt index e1f3a2b2fc..e787080795 100644 --- a/source/libs/sync/test/CMakeLists.txt +++ b/source/libs/sync/test/CMakeLists.txt @@ -18,6 +18,7 @@ add_executable(syncIndexMgrTest "") add_executable(syncLogStoreTest "") add_executable(syncEntryTest "") add_executable(syncEntryCacheTest "") +add_executable(syncHashCacheTest "") add_executable(syncRequestVoteTest "") add_executable(syncRequestVoteReplyTest "") add_executable(syncAppendEntriesTest "") @@ -137,6 +138,10 @@ target_sources(syncEntryCacheTest PRIVATE "syncEntryCacheTest.cpp" ) +target_sources(syncHashCacheTest + PRIVATE + "syncHashCacheTest.cpp" +) target_sources(syncRequestVoteTest PRIVATE "syncRequestVoteTest.cpp" @@ -387,6 +392,11 @@ target_include_directories(syncEntryCacheTest "${TD_SOURCE_DIR}/include/libs/sync" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) +target_include_directories(syncHashCacheTest + PUBLIC + "${TD_SOURCE_DIR}/include/libs/sync" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" +) target_include_directories(syncRequestVoteTest PUBLIC "${TD_SOURCE_DIR}/include/libs/sync" @@ -654,6 +664,10 @@ target_link_libraries(syncEntryCacheTest sync gtest_main ) +target_link_libraries(syncHashCacheTest + sync + gtest_main +) target_link_libraries(syncRequestVoteTest sync gtest_main diff --git a/source/libs/sync/test/syncEntryCacheTest.cpp b/source/libs/sync/test/syncEntryCacheTest.cpp index 6250181b25..66b93563e7 100644 --- a/source/libs/sync/test/syncEntryCacheTest.cpp +++ b/source/libs/sync/test/syncEntryCacheTest.cpp @@ -43,222 +43,82 @@ SRaftEntryCache* createCache(int maxCount) { SSyncNode* pSyncNode = createFakeNode(); ASSERT(pSyncNode != NULL); - SRaftEntryCache* pCache = raftCacheCreate(pSyncNode, maxCount); + SRaftEntryCache* pCache = raftEntryCacheCreate(pSyncNode, maxCount); ASSERT(pCache != NULL); return pCache; } void test1() { - int32_t code = 0; + int32_t code = 0; SRaftEntryCache* pCache = createCache(5); - for (int i = 0; i < 5; ++i) { + for (int i = 0; i < 10; ++i) { SSyncRaftEntry* pEntry = createEntry(i); - code = raftCachePutEntry(pCache, pEntry); - ASSERT(code == 1); - syncEntryDestory(pEntry); + code = raftEntryCachePutEntry(pCache, pEntry); + sTrace("put entry code:%d, pEntry:%p", code, pEntry); } - raftCacheLog2((char*)"==test1 write 5 entries==", pCache); + raftEntryCacheLog2((char*)"==test1 write 5 entries==", pCache); - SyncIndex index; - index = 1; - code = raftCacheDelEntry(pCache, index); - ASSERT(code == 0); - index = 3; - code = raftCacheDelEntry(pCache, index); - ASSERT(code == 0); - raftCacheLog2((char*)"==test1 delete 1,3==", pCache); + raftEntryCacheClear(pCache, 3); + raftEntryCacheLog2((char*)"==test1 evict 3 entries==", pCache); - code = raftCacheClear(pCache); - ASSERT(code == 0); - raftCacheLog2((char*)"==clear all==", pCache); + raftEntryCacheClear(pCache, -1); + raftEntryCacheLog2((char*)"==test1 evict -1(all) entries==", pCache); } void test2() { - int32_t code = 0; + int32_t code = 0; SRaftEntryCache* pCache = createCache(5); - for (int i = 0; i < 5; ++i) { + for (int i = 0; i < 10; ++i) { SSyncRaftEntry* pEntry = createEntry(i); - code = raftCachePutEntry(pCache, pEntry); - ASSERT(code == 1); - syncEntryDestory(pEntry); + code = raftEntryCachePutEntry(pCache, pEntry); + sTrace("put entry code:%d, pEntry:%p", code, pEntry); } - raftCacheLog2((char*)"==test2 write 5 entries==", pCache); + raftEntryCacheLog2((char*)"==test1 write 5 entries==", pCache); - SyncIndex index; - index = 1; - SSyncRaftEntry* pEntry; - code = raftCacheGetEntry(pCache, index, &pEntry); - ASSERT(code == 0); - syncEntryDestory(pEntry); - syncEntryLog2((char*)"==test2 get entry 1==", pEntry); + SyncIndex index = 2; + SSyncRaftEntry* pEntry = NULL; - index = 2; - code = raftCacheGetEntryP(pCache, index, &pEntry); - ASSERT(code == 0); + code = raftEntryCacheGetEntryP(pCache, index, &pEntry); + ASSERT(code == 1 && index == pEntry->index); + sTrace("get entry:%p for %ld", pEntry, index); syncEntryLog2((char*)"==test2 get entry pointer 2==", pEntry); + code = raftEntryCacheGetEntry(pCache, index, &pEntry); + ASSERT(code == 1 && index == pEntry->index); + sTrace("get entry:%p for %ld", pEntry, index); + syncEntryLog2((char*)"==test2 get entry 2==", pEntry); + syncEntryDestory(pEntry); + // not found index = 8; - code = raftCacheGetEntry(pCache, index, &pEntry); - ASSERT(code == -1 && terrno == TSDB_CODE_WAL_LOG_NOT_EXIST); + code = raftEntryCacheGetEntry(pCache, index, &pEntry); + ASSERT(code == 0); + sTrace("get entry:%p for %ld", pEntry, index); sTrace("==test2 get entry 8 not found=="); // not found index = 9; - code = raftCacheGetEntryP(pCache, index, &pEntry); - ASSERT(code == -1 && terrno == TSDB_CODE_WAL_LOG_NOT_EXIST); - sTrace("==test2 get entry pointer 9 not found=="); + code = raftEntryCacheGetEntry(pCache, index, &pEntry); + ASSERT(code == 0); + sTrace("get entry:%p for %ld", pEntry, index); + sTrace("==test2 get entry 9 not found=="); } void test3() { - int32_t code = 0; - SRaftEntryCache* pCache = createCache(5); - for (int i = 0; i < 5; ++i) { + int32_t code = 0; + SRaftEntryCache* pCache = createCache(20); + for (int i = 0; i <= 4; ++i) { SSyncRaftEntry* pEntry = createEntry(i); - code = raftCachePutEntry(pCache, pEntry); - ASSERT(code == 1); - syncEntryDestory(pEntry); + code = raftEntryCachePutEntry(pCache, pEntry); + sTrace("put entry code:%d, pEntry:%p", code, pEntry); } - for (int i = 6; i < 10; ++i) { - SSyncRaftEntry* pEntry = createEntry(i); - code = raftCachePutEntry(pCache, pEntry); - ASSERT(code == 0); - syncEntryDestory(pEntry); - } - raftCacheLog2((char*)"==test3 write 10 entries, max count is 5==", pCache); -} - -void test4() { - int32_t code = 0; - SRaftEntryCache* pCache = createCache(5); - for (int i = 0; i < 5; ++i) { - SSyncRaftEntry* pEntry = createEntry(i); - code = raftCachePutEntry(pCache, pEntry); - ASSERT(code == 1); - syncEntryDestory(pEntry); - } - raftCacheLog2((char*)"==test4 write 5 entries==", pCache); - - SyncIndex index; - index = 3; - SSyncRaftEntry* pEntry; - code = raftCacheGetAndDel(pCache, index, &pEntry); - ASSERT(code == 0); - syncEntryLog2((char*)"==test4 get-and-del entry 3==", pEntry); - raftCacheLog2((char*)"==test4 after get-and-del entry 3==", pCache); -} - -static char* keyFn(const void* pData) { - SSyncRaftEntry* pEntry = (SSyncRaftEntry*)pData; - return (char*)(&(pEntry->index)); -} - -static int cmpFn(const void* p1, const void* p2) { return memcmp(p1, p2, sizeof(SyncIndex)); } - -void printSkipList(SSkipList* pSkipList) { - ASSERT(pSkipList != NULL); - - SSkipListIterator* pIter = tSkipListCreateIter(pSkipList); - while (tSkipListIterNext(pIter)) { - SSkipListNode* pNode = tSkipListIterGet(pIter); - ASSERT(pNode != NULL); - SSyncRaftEntry* pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(pNode); - syncEntryPrint2((char*)"", pEntry); - } -} - -void delSkipListFirst(SSkipList* pSkipList, int n) { - ASSERT(pSkipList != NULL); - - sTrace("delete first %d -------------", n); - SSkipListIterator* pIter = tSkipListCreateIter(pSkipList); - for (int i = 0; i < n; ++i) { - tSkipListIterNext(pIter); - SSkipListNode* pNode = tSkipListIterGet(pIter); - tSkipListRemoveNode(pSkipList, pNode); - } -} - - -SSyncRaftEntry* getLogEntry2(SSkipList* pSkipList, SyncIndex index) { - SyncIndex index2 = index; - SSyncRaftEntry *pEntry = NULL; - int arraySize = 0; - - SArray* entryPArray = tSkipListGet(pSkipList, (char*)(&index2)); - arraySize = taosArrayGetSize(entryPArray); - if (arraySize > 0) { - SSkipListNode** ppNode = (SSkipListNode**)taosArrayGet(entryPArray, 0); - ASSERT(*ppNode != NULL); - pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(*ppNode); - } - taosArrayDestroy(entryPArray); - - sTrace("get index2: %ld, arraySize:%d -------------", index, arraySize); - syncEntryLog2((char*)"getLogEntry2", pEntry); - return pEntry; -} - - -SSyncRaftEntry* getLogEntry(SSkipList* pSkipList, SyncIndex index) { - sTrace("get index: %ld -------------", index); - SyncIndex index2 = index; - SSyncRaftEntry *pEntry = NULL; - SSkipListIterator* pIter = tSkipListCreateIterFromVal(pSkipList, (const char *)&index2, TSDB_DATA_TYPE_BINARY, TSDB_ORDER_ASC); - if (tSkipListIterNext(pIter)) { - SSkipListNode* pNode = tSkipListIterGet(pIter); - ASSERT(pNode != NULL); - pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(pNode); - } - - syncEntryLog2((char*)"getLogEntry", pEntry); - return pEntry; -} - -void test5() { - SSkipList* pSkipList = - tSkipListCreate(MAX_SKIP_LIST_LEVEL, TSDB_DATA_TYPE_BINARY, sizeof(SyncIndex), cmpFn, SL_ALLOW_DUP_KEY, keyFn); - ASSERT(pSkipList != NULL); - - sTrace("insert 9 - 5"); for (int i = 9; i >= 5; --i) { SSyncRaftEntry* pEntry = createEntry(i); - SSkipListNode* pSkipListNode = tSkipListPut(pSkipList, pEntry); + code = raftEntryCachePutEntry(pCache, pEntry); + sTrace("put entry code:%d, pEntry:%p", code, pEntry); } - - sTrace("insert 0 - 4"); - for (int i = 0; i <= 4; ++i) { - SSyncRaftEntry* pEntry = createEntry(i); - SSkipListNode* pSkipListNode = tSkipListPut(pSkipList, pEntry); - } - - sTrace("insert 7 7 7 7 7"); - for (int i = 0; i <= 4; ++i) { - SSyncRaftEntry* pEntry = createEntry(7); - SSkipListNode* pSkipListNode = tSkipListPut(pSkipList, pEntry); - } - - sTrace("print: -------------"); - printSkipList(pSkipList); - - delSkipListFirst(pSkipList, 3); - - sTrace("print: -------------"); - printSkipList(pSkipList); - - getLogEntry(pSkipList, 2); - getLogEntry(pSkipList, 5); - getLogEntry(pSkipList, 7); - getLogEntry(pSkipList, 7); - - getLogEntry2(pSkipList, 2); - getLogEntry2(pSkipList, 5); - getLogEntry2(pSkipList, 7); - getLogEntry2(pSkipList, 7); - - - tSkipListDestroy(pSkipList); + raftEntryCacheLog2((char*)"==test3 write 10 entries==", pCache); } int main(int argc, char** argv) { @@ -266,14 +126,9 @@ int main(int argc, char** argv) { tsAsyncLog = 0; sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE + DEBUG_DEBUG; - /* - test1(); - test2(); - test3(); - test4(); - */ - - test5(); + test1(); + test2(); + test3(); return 0; } diff --git a/source/libs/sync/test/syncHashCacheTest.cpp b/source/libs/sync/test/syncHashCacheTest.cpp new file mode 100644 index 0000000000..f155bd834f --- /dev/null +++ b/source/libs/sync/test/syncHashCacheTest.cpp @@ -0,0 +1,277 @@ +#include +#include "syncEnv.h" +#include "syncIO.h" +#include "syncInt.h" +#include "syncRaftLog.h" +#include "syncRaftStore.h" +#include "syncUtil.h" +#include "tskiplist.h" + +void logTest() { + sTrace("--- sync log test: trace"); + sDebug("--- sync log test: debug"); + sInfo("--- sync log test: info"); + sWarn("--- sync log test: warn"); + sError("--- sync log test: error"); + sFatal("--- sync log test: fatal"); +} + +SSyncRaftEntry* createEntry(int i) { + int32_t dataLen = 20; + SSyncRaftEntry* pEntry = syncEntryBuild(dataLen); + assert(pEntry != NULL); + pEntry->msgType = 88; + pEntry->originalRpcType = 99; + pEntry->seqNum = 3; + pEntry->isWeak = true; + pEntry->term = 100 + i; + pEntry->index = i; + snprintf(pEntry->data, dataLen, "value%d", i); + + return pEntry; +} + +SSyncNode* createFakeNode() { + SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode)); + ASSERT(pSyncNode != NULL); + memset(pSyncNode, 0, sizeof(SSyncNode)); + + return pSyncNode; +} + +SRaftEntryHashCache* createCache(int maxCount) { + SSyncNode* pSyncNode = createFakeNode(); + ASSERT(pSyncNode != NULL); + + SRaftEntryHashCache* pCache = raftCacheCreate(pSyncNode, maxCount); + ASSERT(pCache != NULL); + + return pCache; +} + +void test1() { + int32_t code = 0; + SRaftEntryHashCache* pCache = createCache(5); + for (int i = 0; i < 5; ++i) { + SSyncRaftEntry* pEntry = createEntry(i); + code = raftCachePutEntry(pCache, pEntry); + ASSERT(code == 1); + syncEntryDestory(pEntry); + } + raftCacheLog2((char*)"==test1 write 5 entries==", pCache); + + SyncIndex index; + index = 1; + code = raftCacheDelEntry(pCache, index); + ASSERT(code == 0); + index = 3; + code = raftCacheDelEntry(pCache, index); + ASSERT(code == 0); + raftCacheLog2((char*)"==test1 delete 1,3==", pCache); + + code = raftCacheClear(pCache); + ASSERT(code == 0); + raftCacheLog2((char*)"==clear all==", pCache); +} + +void test2() { + int32_t code = 0; + SRaftEntryHashCache* pCache = createCache(5); + for (int i = 0; i < 5; ++i) { + SSyncRaftEntry* pEntry = createEntry(i); + code = raftCachePutEntry(pCache, pEntry); + ASSERT(code == 1); + syncEntryDestory(pEntry); + } + raftCacheLog2((char*)"==test2 write 5 entries==", pCache); + + SyncIndex index; + index = 1; + SSyncRaftEntry* pEntry; + code = raftCacheGetEntry(pCache, index, &pEntry); + ASSERT(code == 0); + syncEntryDestory(pEntry); + syncEntryLog2((char*)"==test2 get entry 1==", pEntry); + + index = 2; + code = raftCacheGetEntryP(pCache, index, &pEntry); + ASSERT(code == 0); + syncEntryLog2((char*)"==test2 get entry pointer 2==", pEntry); + + // not found + index = 8; + code = raftCacheGetEntry(pCache, index, &pEntry); + ASSERT(code == -1 && terrno == TSDB_CODE_WAL_LOG_NOT_EXIST); + sTrace("==test2 get entry 8 not found=="); + + // not found + index = 9; + code = raftCacheGetEntryP(pCache, index, &pEntry); + ASSERT(code == -1 && terrno == TSDB_CODE_WAL_LOG_NOT_EXIST); + sTrace("==test2 get entry pointer 9 not found=="); +} + +void test3() { + int32_t code = 0; + SRaftEntryHashCache* pCache = createCache(5); + for (int i = 0; i < 5; ++i) { + SSyncRaftEntry* pEntry = createEntry(i); + code = raftCachePutEntry(pCache, pEntry); + ASSERT(code == 1); + syncEntryDestory(pEntry); + } + for (int i = 6; i < 10; ++i) { + SSyncRaftEntry* pEntry = createEntry(i); + code = raftCachePutEntry(pCache, pEntry); + ASSERT(code == 0); + syncEntryDestory(pEntry); + } + raftCacheLog2((char*)"==test3 write 10 entries, max count is 5==", pCache); +} + +void test4() { + int32_t code = 0; + SRaftEntryHashCache* pCache = createCache(5); + for (int i = 0; i < 5; ++i) { + SSyncRaftEntry* pEntry = createEntry(i); + code = raftCachePutEntry(pCache, pEntry); + ASSERT(code == 1); + syncEntryDestory(pEntry); + } + raftCacheLog2((char*)"==test4 write 5 entries==", pCache); + + SyncIndex index; + index = 3; + SSyncRaftEntry* pEntry; + code = raftCacheGetAndDel(pCache, index, &pEntry); + ASSERT(code == 0); + syncEntryLog2((char*)"==test4 get-and-del entry 3==", pEntry); + raftCacheLog2((char*)"==test4 after get-and-del entry 3==", pCache); +} + +static char* keyFn(const void* pData) { + SSyncRaftEntry* pEntry = (SSyncRaftEntry*)pData; + return (char*)(&(pEntry->index)); +} + +static int cmpFn(const void* p1, const void* p2) { return memcmp(p1, p2, sizeof(SyncIndex)); } + +void printSkipList(SSkipList* pSkipList) { + ASSERT(pSkipList != NULL); + + SSkipListIterator* pIter = tSkipListCreateIter(pSkipList); + while (tSkipListIterNext(pIter)) { + SSkipListNode* pNode = tSkipListIterGet(pIter); + ASSERT(pNode != NULL); + SSyncRaftEntry* pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(pNode); + syncEntryPrint2((char*)"", pEntry); + } +} + +void delSkipListFirst(SSkipList* pSkipList, int n) { + ASSERT(pSkipList != NULL); + + sTrace("delete first %d -------------", n); + SSkipListIterator* pIter = tSkipListCreateIter(pSkipList); + for (int i = 0; i < n; ++i) { + tSkipListIterNext(pIter); + SSkipListNode* pNode = tSkipListIterGet(pIter); + tSkipListRemoveNode(pSkipList, pNode); + } +} + +SSyncRaftEntry* getLogEntry2(SSkipList* pSkipList, SyncIndex index) { + SyncIndex index2 = index; + SSyncRaftEntry* pEntry = NULL; + int arraySize = 0; + + SArray* entryPArray = tSkipListGet(pSkipList, (char*)(&index2)); + arraySize = taosArrayGetSize(entryPArray); + if (arraySize > 0) { + SSkipListNode** ppNode = (SSkipListNode**)taosArrayGet(entryPArray, 0); + ASSERT(*ppNode != NULL); + pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(*ppNode); + } + taosArrayDestroy(entryPArray); + + sTrace("get index2: %ld, arraySize:%d -------------", index, arraySize); + syncEntryLog2((char*)"getLogEntry2", pEntry); + return pEntry; +} + +SSyncRaftEntry* getLogEntry(SSkipList* pSkipList, SyncIndex index) { + sTrace("get index: %ld -------------", index); + SyncIndex index2 = index; + SSyncRaftEntry* pEntry = NULL; + SSkipListIterator* pIter = + tSkipListCreateIterFromVal(pSkipList, (const char*)&index2, TSDB_DATA_TYPE_BINARY, TSDB_ORDER_ASC); + if (tSkipListIterNext(pIter)) { + SSkipListNode* pNode = tSkipListIterGet(pIter); + ASSERT(pNode != NULL); + pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(pNode); + } + + syncEntryLog2((char*)"getLogEntry", pEntry); + return pEntry; +} + +void test5() { + SSkipList* pSkipList = + tSkipListCreate(MAX_SKIP_LIST_LEVEL, TSDB_DATA_TYPE_BINARY, sizeof(SyncIndex), cmpFn, SL_ALLOW_DUP_KEY, keyFn); + ASSERT(pSkipList != NULL); + + sTrace("insert 9 - 5"); + for (int i = 9; i >= 5; --i) { + SSyncRaftEntry* pEntry = createEntry(i); + SSkipListNode* pSkipListNode = tSkipListPut(pSkipList, pEntry); + } + + sTrace("insert 0 - 4"); + for (int i = 0; i <= 4; ++i) { + SSyncRaftEntry* pEntry = createEntry(i); + SSkipListNode* pSkipListNode = tSkipListPut(pSkipList, pEntry); + } + + sTrace("insert 7 7 7 7 7"); + for (int i = 0; i <= 4; ++i) { + SSyncRaftEntry* pEntry = createEntry(7); + SSkipListNode* pSkipListNode = tSkipListPut(pSkipList, pEntry); + } + + sTrace("print: -------------"); + printSkipList(pSkipList); + + delSkipListFirst(pSkipList, 3); + + sTrace("print: -------------"); + printSkipList(pSkipList); + + getLogEntry(pSkipList, 2); + getLogEntry(pSkipList, 5); + getLogEntry(pSkipList, 7); + getLogEntry(pSkipList, 7); + + getLogEntry2(pSkipList, 2); + getLogEntry2(pSkipList, 5); + getLogEntry2(pSkipList, 7); + getLogEntry2(pSkipList, 7); + + tSkipListDestroy(pSkipList); +} + +int main(int argc, char** argv) { + gRaftDetailLog = true; + tsAsyncLog = 0; + sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE + DEBUG_DEBUG; + + /* + test1(); + test2(); + test3(); + test4(); + */ + + test5(); + + return 0; +} From c2aebfa3224e9fd50ed03d65924ecdd6cf4aee43 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 16:26:22 +0800 Subject: [PATCH 33/56] enh: add stream queue to vnode --- include/common/tglobal.h | 1 + include/common/tmsgcb.h | 1 + source/common/src/tglobal.c | 6 ++++ source/dnode/mgmt/mgmt_vnode/inc/vmInt.h | 3 ++ source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 1 + source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 39 ++++++++++++++++++++- source/libs/stream/src/stream.c | 2 +- 7 files changed, 51 insertions(+), 2 deletions(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 944eaa28bc..96014b1234 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -64,6 +64,7 @@ extern int32_t tsNumOfMnodeQueryThreads; extern int32_t tsNumOfMnodeFetchThreads; extern int32_t tsNumOfMnodeReadThreads; extern int32_t tsNumOfVnodeQueryThreads; +extern int32_t tsNumOfVnodeStreamThreads; extern int32_t tsNumOfVnodeFetchThreads; extern int32_t tsNumOfVnodeWriteThreads; extern int32_t tsNumOfVnodeSyncThreads; diff --git a/include/common/tmsgcb.h b/include/common/tmsgcb.h index b56f755266..c13c50e161 100644 --- a/include/common/tmsgcb.h +++ b/include/common/tmsgcb.h @@ -34,6 +34,7 @@ typedef enum { WRITE_QUEUE, APPLY_QUEUE, SYNC_QUEUE, + STREAM_QUEUE, QUEUE_MAX, } EQueueType; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 24bf4e5c2d..2d3f35ea39 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -55,6 +55,7 @@ int32_t tsNumOfMnodeQueryThreads = 2; int32_t tsNumOfMnodeFetchThreads = 1; int32_t tsNumOfMnodeReadThreads = 1; int32_t tsNumOfVnodeQueryThreads = 2; +int32_t tsNumOfVnodeStreamThreads = 2; int32_t tsNumOfVnodeFetchThreads = 4; int32_t tsNumOfVnodeWriteThreads = 2; int32_t tsNumOfVnodeSyncThreads = 2; @@ -412,6 +413,10 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 2); if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1; + tsNumOfVnodeStreamThreads = tsNumOfCores / 4; + tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4); + if (cfgAddInt32(pCfg, "numOfVnodeStreamThreads", tsNumOfVnodeStreamThreads, 1, 1024, 0) != 0) return -1; + tsNumOfVnodeFetchThreads = tsNumOfCores / 4; tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4); if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1; @@ -587,6 +592,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfMnodeQueryThreads = cfgGetItem(pCfg, "numOfMnodeQueryThreads")->i32; tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32; tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32; + tsNumOfVnodeStreamThreads = cfgGetItem(pCfg, "numOfVnodeStreamThreads")->i32; tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32; tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32; tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32; diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 6fc0ab4e5d..ebbb9fa5d4 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -31,6 +31,7 @@ typedef struct SVnodeMgmt { const char *path; const char *name; SQWorkerPool queryPool; + SQWorkerPool streamPool; SWWorkerPool fetchPool; SWWorkerPool syncPool; SWWorkerPool writePool; @@ -61,6 +62,7 @@ typedef struct { STaosQueue *pSyncQ; STaosQueue *pApplyQ; STaosQueue *pQueryQ; + STaosQueue *pStreamQ; STaosQueue *pFetchQ; } SVnodeObj; @@ -105,6 +107,7 @@ int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t vmPutMsgToStreamQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmPutMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmPutMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 36f6fab699..1f981cc9e0 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -92,6 +92,7 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { while (!taosQueueEmpty(pVnode->pApplyQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10); + while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(10); dTrace("vgId:%d, vnode queue is empty", pVnode->vgId); vmFreeQueue(pMgmt, pVnode); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 93f93b1ab7..4a60dbfe0f 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -81,6 +81,23 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { taosFreeQitem(pMsg); } +static void vmProcessStreamQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { + SVnodeObj *pVnode = pInfo->ahandle; + const STraceId *trace = &pMsg->info.traceId; + + dGTrace("vgId:%d, msg:%p get from vnode-stream queue", pVnode->vgId, pMsg); + int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); + if (code != 0) { + if (terrno != 0) code = terrno; + dGError("vgId:%d, msg:%p failed to stream since %s", pVnode->vgId, pMsg, terrstr()); + vmSendRsp(pMsg, code); + } + + dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); + rpcFreeCont(pMsg->pCont); + taosFreeQitem(pMsg); +} + static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { SVnodeObj *pVnode = pInfo->ahandle; SRpcMsg *pMsg = NULL; @@ -140,6 +157,10 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp dGTrace("vgId:%d, msg:%p put into vnode-query queue", pVnode->vgId, pMsg); taosWriteQitem(pVnode->pQueryQ, pMsg); break; + case STREAM_QUEUE: + dGTrace("vgId:%d, msg:%p put into vnode-stream queue", pVnode->vgId, pMsg); + taosWriteQitem(pVnode->pStreamQ, pMsg); + break; case FETCH_QUEUE: dGTrace("vgId:%d, msg:%p put into vnode-fetch queue", pVnode->vgId, pMsg); taosWriteQitem(pVnode->pFetchQ, pMsg); @@ -174,6 +195,8 @@ int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsg int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); } +int32_t vmPutMsgToStreamQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_QUEUE); } + int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { const STraceId *trace = &pMsg->info.traceId; dGTrace("msg:%p, put into vnode-mgmt queue", pMsg); @@ -234,6 +257,9 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) { case FETCH_QUEUE: size = taosQueueItemSize(pVnode->pFetchQ); break; + case STREAM_QUEUE: + size = taosQueueItemSize(pVnode->pStreamQ); + break; default: break; } @@ -247,10 +273,11 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue); pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->applyPool, pVnode->pImpl, (FItems)vnodeApplyWriteMsg); pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue); + pVnode->pStreamQ = tQWorkerAllocQueue(&pMgmt->streamPool, pVnode, (FItem)vmProcessStreamQueue); pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue); if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL || - pVnode->pFetchQ == NULL) { + pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } @@ -259,6 +286,7 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { dDebug("vgId:%d, sync-queue:%p is alloced", pVnode->vgId, pVnode->pSyncQ); dDebug("vgId:%d, apply-queue:%p is alloced", pVnode->vgId, pVnode->pApplyQ); dDebug("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ); + dDebug("vgId:%d, stream-queue:%p is alloced", pVnode->vgId, pVnode->pStreamQ); dDebug("vgId:%d, fetch-queue:%p is alloced", pVnode->vgId, pVnode->pFetchQ); return 0; } @@ -268,11 +296,13 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { tWWorkerFreeQueue(&pMgmt->applyPool, pVnode->pApplyQ); tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ); tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ); + tQWorkerFreeQueue(&pMgmt->streamPool, pVnode->pStreamQ); tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ); pVnode->pWriteQ = NULL; pVnode->pSyncQ = NULL; pVnode->pApplyQ = NULL; pVnode->pQueryQ = NULL; + pVnode->pStreamQ = NULL; pVnode->pFetchQ = NULL; dDebug("vgId:%d, queue is freed", pVnode->vgId); } @@ -284,6 +314,12 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) { pQPool->max = tsNumOfVnodeQueryThreads; if (tQWorkerInit(pQPool) != 0) return -1; + SQWorkerPool *pStreamPool = &pMgmt->streamPool; + pStreamPool->name = "vnode-stream"; + pStreamPool->min = tsNumOfVnodeStreamThreads; + pStreamPool->max = tsNumOfVnodeStreamThreads; + if (tQWorkerInit(pStreamPool) != 0) return -1; + SWWorkerPool *pFPool = &pMgmt->fetchPool; pFPool->name = "vnode-fetch"; pFPool->max = tsNumOfVnodeFetchThreads; @@ -333,6 +369,7 @@ void vmStopWorker(SVnodeMgmt *pMgmt) { tWWorkerCleanup(&pMgmt->applyPool); tWWorkerCleanup(&pMgmt->syncPool); tQWorkerCleanup(&pMgmt->queryPool); + tQWorkerCleanup(&pMgmt->streamPool); tWWorkerCleanup(&pMgmt->fetchPool); dDebug("vnode workers are closed"); } diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index becfac0cac..566d9209a8 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -97,7 +97,7 @@ int32_t streamLaunchByWrite(SStreamTask* pTask, int32_t vgId) { .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq), }; - tmsgPutToQueue(pTask->pMsgCb, FETCH_QUEUE, &msg); + tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &msg); } return 0; } From ff20bae06bd3e26c7707e3219c20316742515c67 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 16:36:03 +0800 Subject: [PATCH 34/56] test: restore some 2.0 case --- tests/script/general/import/testSuite.sim | 4 --- tests/script/jenkins/basic.txt | 6 ++++ .../script/{general => tsim}/import/basic.sim | 26 ---------------- .../{general => tsim}/import/commit.sim | 28 ----------------- .../script/{general => tsim}/import/large.sim | 26 ---------------- .../{general => tsim}/import/replica1.sim | 31 ------------------- 6 files changed, 6 insertions(+), 115 deletions(-) delete mode 100644 tests/script/general/import/testSuite.sim rename tests/script/{general => tsim}/import/basic.sim (75%) rename tests/script/{general => tsim}/import/commit.sim (66%) rename tests/script/{general => tsim}/import/large.sim (63%) rename tests/script/{general => tsim}/import/replica1.sim (84%) diff --git a/tests/script/general/import/testSuite.sim b/tests/script/general/import/testSuite.sim deleted file mode 100644 index 9157410ea5..0000000000 --- a/tests/script/general/import/testSuite.sim +++ /dev/null @@ -1,4 +0,0 @@ -run general/import/basic.sim -run general/import/commit.sim -run general/import/large.sim -run general/import/replica1.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index b24ca5f85e..32279b3a4a 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -66,6 +66,12 @@ # ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim # ./test.sh -f tsim/dnode/vnode_clean.sim +# ---- import +./test.sh -f tsim/import/basic.sim +./test.sh -f tsim/import/commit.sim +./test.sh -f tsim/import/large.sim +./test.sh -f tsim/import/replica1.sim + # ---- insert ./test.sh -f tsim/insert/basic0.sim ./test.sh -f tsim/insert/basic1.sim diff --git a/tests/script/general/import/basic.sim b/tests/script/tsim/import/basic.sim similarity index 75% rename from tests/script/general/import/basic.sim rename to tests/script/tsim/import/basic.sim index f72d132ca3..cb1b525120 100644 --- a/tests/script/general/import/basic.sim +++ b/tests/script/tsim/import/basic.sim @@ -1,32 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1 - -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 10 - -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 2000 - -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 - -print ========= start dnode1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect sql create database ibadb diff --git a/tests/script/general/import/commit.sim b/tests/script/tsim/import/commit.sim similarity index 66% rename from tests/script/general/import/commit.sim rename to tests/script/tsim/import/commit.sim index aefc724fdb..e7f0440f5e 100644 --- a/tests/script/general/import/commit.sim +++ b/tests/script/tsim/import/commit.sim @@ -1,32 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1 - -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 10 - -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 2000 - -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 - -print ========= start dnode1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect print ========= step1 @@ -72,9 +46,7 @@ endi print ========= step3 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 print ========= step4 sql select * from ic2db.tb; diff --git a/tests/script/general/import/large.sim b/tests/script/tsim/import/large.sim similarity index 63% rename from tests/script/general/import/large.sim rename to tests/script/tsim/import/large.sim index 23fbcc75ea..f694502e2a 100644 --- a/tests/script/general/import/large.sim +++ b/tests/script/tsim/import/large.sim @@ -1,32 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1 - -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 10 - -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 2000 - -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 - -print ========= start dnode1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect sql create database db diff --git a/tests/script/general/import/replica1.sim b/tests/script/tsim/import/replica1.sim similarity index 84% rename from tests/script/general/import/replica1.sim rename to tests/script/tsim/import/replica1.sim index 1e8eabb798..3a4b4a2876 100644 --- a/tests/script/general/import/replica1.sim +++ b/tests/script/tsim/import/replica1.sim @@ -1,33 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1 - -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 10 -system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 10 - -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 2000 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 2000 - -system sh/cfg.sh -n dnode1 -c walLevel -v 2 -system sh/cfg.sh -n dnode2 -c walLevel -v 2 -system sh/cfg.sh -n dnode3 -c walLevel -v 2 -system sh/cfg.sh -n dnode4 -c walLevel -v 2 - -print ========= start dnode1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect sql create database ir1db duration 7 @@ -93,9 +66,7 @@ endi print ================== dnode restart system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 sql use ir1db sql select * from tb; @@ -162,9 +133,7 @@ endi print ================= step10 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 sql use ir1db sql select * from tb; From 8a6e4b877a7e453d90e6ea86cb5d359f99300b3f Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 13 Jul 2022 16:37:33 +0800 Subject: [PATCH 35/56] refactor(stream): simple batch --- include/libs/executor/executor.h | 5 +- include/libs/stream/tstream.h | 6 ++- source/libs/executor/CMakeLists.txt | 2 +- source/libs/executor/inc/executorimpl.h | 3 ++ source/libs/executor/src/executor.c | 2 - source/libs/executor/src/executorMain.c | 14 ++++++ source/libs/executor/src/scanoperator.c | 23 --------- source/libs/executor/src/timewindowoperator.c | 4 +- source/libs/qworker/CMakeLists.txt | 2 +- source/libs/stream/inc/streamInc.h | 3 ++ source/libs/stream/src/streamData.c | 26 ++++++++++ source/libs/stream/src/streamDispatch.c | 5 +- source/libs/stream/src/streamExec.c | 47 +++++++++++++------ 13 files changed, 94 insertions(+), 48 deletions(-) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 630e983f81..dd64c5bf71 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -186,9 +186,12 @@ int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset); void* qStreamExtractMetaMsg(qTaskInfo_t tinfo); -void* qExtractReaderFromStreamScanner(void* scanner); +void* qExtractReaderFromStreamScanner(void* scanner); + int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner); +int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem); + #ifdef __cplusplus } #endif diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index ac9784b91b..cae14a6d59 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "executor.h" #include "os.h" #include "query.h" #include "tdatablock.h" @@ -120,7 +121,6 @@ static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) { return queue static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) { int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING); if (dequeueFlag == STREAM_QUEUE__FAILED) { - ASSERT(0); ASSERT(queue->qItem != NULL); return streamQueueCurItem(queue); } else { @@ -309,12 +309,16 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem } qInfo("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data); taosWriteQitem(pTask->inputQueue->queue, pSubmitClone); + // qStreamInput(pTask->exec.executor, pSubmitClone); } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { taosWriteQitem(pTask->inputQueue->queue, pItem); + // qStreamInput(pTask->exec.executor, pItem); } else if (pItem->type == STREAM_INPUT__CHECKPOINT) { taosWriteQitem(pTask->inputQueue->queue, pItem); + // qStreamInput(pTask->exec.executor, pItem); } else if (pItem->type == STREAM_INPUT__TRIGGER) { taosWriteQitem(pTask->inputQueue->queue, pItem); + // qStreamInput(pTask->exec.executor, pItem); } if (pItem->type != STREAM_INPUT__TRIGGER && pItem->type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { diff --git a/source/libs/executor/CMakeLists.txt b/source/libs/executor/CMakeLists.txt index ed15aeb038..89d08b3078 100644 --- a/source/libs/executor/CMakeLists.txt +++ b/source/libs/executor/CMakeLists.txt @@ -8,7 +8,7 @@ add_library(executor STATIC ${EXECUTOR_SRC}) # ) target_link_libraries(executor - PRIVATE os util common function parser planner qcom vnode scalar nodes index + PRIVATE os util common function parser planner qcom vnode scalar nodes index stream ) target_include_directories( diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index aab2f51421..045b4379ca 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -39,6 +39,7 @@ extern "C" { #include "tmsg.h" #include "tpagedbuf.h" #include "tstreamUpdate.h" +#include "tstream.h" #include "vnode.h" #include "executorInt.h" @@ -139,12 +140,14 @@ typedef struct STaskIdInfo { } STaskIdInfo; typedef struct { + //TODO remove prepareStatus STqOffsetVal prepareStatus; // for tmq STqOffsetVal lastStatus; // for tmq void* metaBlk; // for tmq fetching meta SSDataBlock* pullOverBlk; // for streaming SWalFilterCond cond; int64_t lastScanUid; + SStreamQueue* inputQueue; } SStreamTaskInfo; typedef struct SExecTaskInfo { diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 6e4f02527f..f2008ed97a 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -60,8 +60,6 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu taosArrayAddAll(p->pDataBlock, pDataBlock->pDataBlock); taosArrayPush(pInfo->pBlockLists, &p); } - /*} else if (type == STREAM_INPUT__TABLE_SCAN) {*/ - /*ASSERT(pInfo->blockType == STREAM_INPUT__TABLE_SCAN);*/ } else { ASSERT(0); } diff --git a/source/libs/executor/src/executorMain.c b/source/libs/executor/src/executorMain.c index 5d9fea523c..6381d20255 100644 --- a/source/libs/executor/src/executorMain.c +++ b/source/libs/executor/src/executorMain.c @@ -44,6 +44,13 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, goto _error; } + if (model == OPTR_EXEC_MODEL_STREAM) { + (*pTask)->streamInfo.inputQueue = streamQueueOpen(); + if ((*pTask)->streamInfo.inputQueue == NULL) { + goto _error; + } + } + SDataSinkMgtCfg cfg = {.maxDataBlockNum = 1000, .maxDataBlockNumPerQuery = 100}; code = dsDataSinkMgtInit(&cfg); if (code != TSDB_CODE_SUCCESS) { @@ -252,6 +259,13 @@ int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner) { } } +int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); + taosWriteQitem(pTaskInfo->streamInfo.inputQueue->queue, pItem); + return 0; +} + void* qExtractReaderFromStreamScanner(void* scanner) { SStreamScanInfo* pInfo = scanner; return (void*)pInfo->tqReader; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 64c740decf..b14648991e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1202,15 +1202,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock taosArrayDestroy(pBlock->pDataBlock); ASSERT(pInfo->pRes->pDataBlock != NULL); -#if 0 - if (pInfo->pRes->pDataBlock == NULL) { - // TODO add log - updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo); - pOperator->status = OP_EXEC_DONE; - pTaskInfo->code = terrno; - return -1; - } -#endif // currently only the tbname pseudo column if (pInfo->numOfPseudoExpr > 0) { @@ -1231,11 +1222,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamScanInfo* pInfo = pOperator->info; - /*pTaskInfo->code = pOperator->fpSet._openFn(pOperator);*/ - /*if (pTaskInfo->code != TSDB_CODE_SUCCESS || pOperator->status == OP_EXEC_DONE) {*/ - /*return NULL;*/ - /*}*/ - qDebug("stream scan called"); if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { while (1) { @@ -1425,15 +1411,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { qDebug("scan rows: %d", pBlockInfo->rows); return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes; -#if 0 - } else if (pInfo->blockType == STREAM_INPUT__TABLE_SCAN) { - ASSERT(0); - // check reader last status - // if not match, reset status - SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); - return pResult && pResult->info.rows > 0 ? pResult : NULL; -#endif - } else { ASSERT(0); return NULL; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 773484a9b3..da78480d3b 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -653,11 +653,11 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num void printDataBlock(SSDataBlock* pBlock, const char* flag) { if (pBlock == NULL) { - qDebug("======printDataBlock Block is Null"); + qInfo("======printDataBlock Block is Null"); return; } char* pBuf = NULL; - qDebug("%s", dumpBlockData(pBlock, flag, &pBuf)); + qInfo("%s", dumpBlockData(pBlock, flag, &pBuf)); taosMemoryFree(pBuf); } diff --git a/source/libs/qworker/CMakeLists.txt b/source/libs/qworker/CMakeLists.txt index 306753808b..92ccde3163 100644 --- a/source/libs/qworker/CMakeLists.txt +++ b/source/libs/qworker/CMakeLists.txt @@ -13,4 +13,4 @@ target_link_libraries(qworker if(${BUILD_TEST}) ADD_SUBDIRECTORY(test) -endif(${BUILD_TEST}) \ No newline at end of file +endif(${BUILD_TEST}) diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index 1629c863d5..f9f4e62774 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -42,6 +42,9 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq); +int32_t streamAppendQueueItem(SStreamQueueItem* dst, SStreamQueueItem* elem); +void streamFreeQitem(SStreamQueueItem* data); + #ifdef __cplusplus } #endif diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 529615d4fd..2b3307b7f5 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -97,3 +97,29 @@ void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) { taosMemoryFree(pDataSubmit->dataRef); } } + +int32_t streamAppendQueueItem(SStreamQueueItem* dst, SStreamQueueItem* elem) { + ASSERT(elem); + if (dst->type == elem->type && dst->type == STREAM_INPUT__DATA_BLOCK) { + SStreamDataBlock* pBlock = (SStreamDataBlock*)dst; + SStreamDataBlock* pBlockSrc = (SStreamDataBlock*)elem; + taosArrayAddAll(pBlock->blocks, pBlockSrc->blocks); + return 0; + } else { + return -1; + } +} + +void streamFreeQitem(SStreamQueueItem* data) { + int8_t type = data->type; + if (type == STREAM_INPUT__TRIGGER) { + blockDataDestroy(((SStreamTrigger*)data)->pBlock); + taosFreeQitem(data); + } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE) { + taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock); + taosFreeQitem(data); + } else if (type == STREAM_INPUT__DATA_SUBMIT) { + streamDataSubmitRefDec((SStreamDataSubmit*)data); + taosFreeQitem(data); + } +} diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 98b0874b00..05efce8bc2 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -251,8 +251,8 @@ int32_t streamBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* data, ASSERT(vgId > 0 || vgId == SNODE_HANDLE); req.taskId = downstreamTaskId; - qInfo("dispatch from task %d (child id %d) to down stream task %d in vnode %d", pTask->taskId, pTask->selfChildId, - downstreamTaskId, vgId); + qDebug("dispatch from task %d (child id %d) to down stream task %d in vnode %d", pTask->taskId, pTask->selfChildId, + downstreamTaskId, vgId); // serialize int32_t tlen; @@ -298,6 +298,7 @@ int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb) { SStreamDataBlock* pBlock = streamQueueNextItem(pTask->outputQueue); if (pBlock == NULL) { + qDebug("stream stop dispatching since no output: task %d", pTask->taskId); atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL); return 0; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 36885e73c0..9644d9eac6 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -75,10 +75,35 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) { while (1) { - void* data = streamQueueNextItem(pTask->inputQueue); + int32_t cnt = 0; + void* data = NULL; + while (1) { + SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue); + if (qItem == NULL) { + qInfo("stream exec over, queue empty"); + break; + } + if (data == NULL) { + data = qItem; + streamQueueProcessSuccess(pTask->inputQueue); + continue; + } else { + if (streamAppendQueueItem(data, qItem) < 0) { + streamQueueProcessFail(pTask->inputQueue); + break; + } else { + cnt++; + streamQueueProcessSuccess(pTask->inputQueue); + taosArrayDestroy(((SStreamDataBlock*)qItem)->blocks); + taosFreeQitem(qItem); + } + } + } if (data == NULL) break; + qInfo("stream task %d exec begin, batch msg: %d", pTask->taskId, cnt); streamTaskExecImpl(pTask, data, pRes); + qInfo("stream task %d exec end", pTask->taskId); if (pTask->taskStatus == TASK_STATUS__DROPPING) { taosArrayDestroyEx(pRes, (FDelete)tDeleteSSDataBlock); @@ -95,27 +120,16 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) { qRes->type = STREAM_INPUT__DATA_BLOCK; qRes->blocks = pRes; if (streamTaskOutput(pTask, qRes) < 0) { - streamQueueProcessFail(pTask->inputQueue); + /*streamQueueProcessFail(pTask->inputQueue);*/ taosArrayDestroyEx(pRes, (FDelete)tDeleteSSDataBlock); taosFreeQitem(qRes); return NULL; } - streamQueueProcessSuccess(pTask->inputQueue); + /*streamQueueProcessSuccess(pTask->inputQueue);*/ pRes = taosArrayInit(0, sizeof(SSDataBlock)); } - int8_t type = ((SStreamQueueItem*)data)->type; - if (type == STREAM_INPUT__TRIGGER) { - blockDataDestroy(((SStreamTrigger*)data)->pBlock); - taosFreeQitem(data); - } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE) { - taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock); - taosFreeQitem(data); - } else if (type == STREAM_INPUT__DATA_SUBMIT) { - ASSERT(pTask->isDataScan); - streamDataSubmitRefDec((SStreamDataSubmit*)data); - taosFreeQitem(data); - } + streamFreeQitem(data); } return pRes; } @@ -129,6 +143,7 @@ int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) { atomic_val_compare_exchange_8(&pTask->execStatus, TASK_EXEC_STATUS__IDLE, TASK_EXEC_STATUS__EXECUTING); if (execStatus == TASK_EXEC_STATUS__IDLE) { // first run + qDebug("stream exec, enter exec status"); pRes = streamExecForQall(pTask, pRes); if (pRes == NULL) goto FAIL; @@ -136,11 +151,13 @@ int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) { atomic_store_8(&pTask->execStatus, TASK_EXEC_STATUS__CLOSING); // second run, make sure inputQ and qall are cleared + qDebug("stream exec, enter closing status"); pRes = streamExecForQall(pTask, pRes); if (pRes == NULL) goto FAIL; taosArrayDestroyEx(pRes, (FDelete)tDeleteSSDataBlock); atomic_store_8(&pTask->execStatus, TASK_EXEC_STATUS__IDLE); + qDebug("stream exec, return result"); return 0; } else if (execStatus == TASK_EXEC_STATUS__CLOSING) { continue; From 8c83f07fe436369deb193347b6ea5a83407f641f Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 13 Jul 2022 16:43:50 +0800 Subject: [PATCH 36/56] fix: fix natural timestamp issue --- include/common/ttime.h | 2 + include/libs/scalar/scalar.h | 1 + source/common/src/ttime.c | 26 ++++ source/libs/parser/src/parCalcConst.c | 11 +- source/libs/parser/src/parTranslater.c | 3 +- source/libs/scalar/inc/sclInt.h | 1 + source/libs/scalar/src/scalar.c | 161 +++++++++++++------------ source/libs/scalar/src/sclvector.c | 41 ++++--- tests/script/jenkins/basic.txt | 1 + tests/script/tsim/scalar/scalar.sim | 67 ++++++++++ 10 files changed, 219 insertions(+), 95 deletions(-) create mode 100644 tests/script/tsim/scalar/scalar.sim diff --git a/include/common/ttime.h b/include/common/ttime.h index cd704bb1f7..de55b016cd 100644 --- a/include/common/ttime.h +++ b/include/common/ttime.h @@ -72,6 +72,8 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) { } int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision); +int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision); + int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision); int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision); diff --git a/include/libs/scalar/scalar.h b/include/libs/scalar/scalar.h index c81c474366..517c5ff0e6 100644 --- a/include/libs/scalar/scalar.h +++ b/include/libs/scalar/scalar.h @@ -32,6 +32,7 @@ pNode will be freed in API; *pRes need to freed in caller */ int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes); +int32_t scalarCalculateConstantsFromDual(SNode *pNode, SNode **pRes); /* pDst need to freed in caller diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index befb0abac8..d728bbe49e 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -710,6 +710,32 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) { return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision)); } +int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision) { + if (duration == 0) { + return t; + } + + if (unit != 'n' && unit != 'y') { + return t - duration; + } + + // The following code handles the y/n time duration + int64_t numOfMonth = duration; + if (unit == 'y') { + numOfMonth *= 12; + } + + struct tm tm; + time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision)); + taosLocalTime(&tt, &tm); + int32_t mon = tm.tm_year * 12 + tm.tm_mon - (int32_t)numOfMonth; + tm.tm_year = mon / 12; + tm.tm_mon = mon % 12; + + return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision)); +} + + int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) { if (ekey < skey) { int64_t tmp = ekey; diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 4dff42592a..cd10cd50f6 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -166,7 +166,7 @@ static int32_t calcConstStmtCondition(SCalcConstContext* pCxt, SNode** pCond, bo return code; } -static int32_t calcConstProject(SNode* pProject, SNode** pNew) { +static int32_t calcConstProject(SNode* pProject, bool dual, SNode** pNew) { SArray* pAssociation = NULL; if (NULL != ((SExprNode*)pProject)->pAssociation) { pAssociation = taosArrayDup(((SExprNode*)pProject)->pAssociation); @@ -177,7 +177,12 @@ static int32_t calcConstProject(SNode* pProject, SNode** pNew) { char aliasName[TSDB_COL_NAME_LEN] = {0}; strcpy(aliasName, ((SExprNode*)pProject)->aliasName); - int32_t code = scalarCalculateConstants(pProject, pNew); + int32_t code = TSDB_CODE_SUCCESS; + if (dual) { + code = scalarCalculateConstantsFromDual(pProject, pNew); + } else { + code = scalarCalculateConstants(pProject, pNew); + } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE == nodeType(*pNew) && NULL != pAssociation) { strcpy(((SExprNode*)*pNew)->aliasName, aliasName); int32_t size = taosArrayGetSize(pAssociation); @@ -223,7 +228,7 @@ static int32_t calcConstProjections(SCalcConstContext* pCxt, SSelectStmt* pSelec continue; } SNode* pNew = NULL; - int32_t code = calcConstProject(pProj, &pNew); + int32_t code = calcConstProject(pProj, (NULL == pSelect->pFromTable), &pNew); if (TSDB_CODE_SUCCESS == code) { REPLACE_NODE(pNew); } else { diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index da393bb883..f90c7ee23c 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -916,8 +916,6 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD } if (TSDB_DATA_TYPE_NULL == pVal->node.resType.type) { - // TODO - // pVal->node.resType = targetDt; pVal->translate = true; pVal->isNull = true; return DEAL_RES_CONTINUE; @@ -932,6 +930,7 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD res = translateNormalValue(pCxt, pVal, targetDt, strict); } pVal->node.resType = targetDt; + pVal->node.resType.scale = pVal->unit; pVal->translate = true; return res; } diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h index 4422da1b81..d423b92da7 100644 --- a/source/libs/scalar/inc/sclInt.h +++ b/source/libs/scalar/inc/sclInt.h @@ -30,6 +30,7 @@ typedef struct SOperatorValueType { typedef struct SScalarCtx { int32_t code; + bool dual; SArray *pBlockList; /* element is SSDataBlock* */ SHashObj *pRes; /* element is SScalarParam */ void *param; // additional parameter (meta actually) for acquire value such as tbname/tags values diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index e610fcb62e..a2e9f7a1cd 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -700,7 +700,7 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) { SFunctionNode *node = (SFunctionNode *)*pNode; SNode* tnode = NULL; - if (!fmIsScalarFunc(node->funcId)) { + if ((!fmIsScalarFunc(node->funcId)) && (!ctx->dual)) { return DEAL_RES_CONTINUE; } @@ -1010,13 +1010,14 @@ int32_t sclExtendResRows(SScalarParam *pDst, SScalarParam *pSrc, SArray *pBlockL return TSDB_CODE_SUCCESS; } -int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes) { +int32_t sclCalcConstants(SNode *pNode, bool dual, SNode **pRes) { if (NULL == pNode) { SCL_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } int32_t code = 0; SScalarCtx ctx = {0}; + ctx.dual = dual; ctx.pRes = taosHashInit(SCL_DEFAULT_OP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); if (NULL == ctx.pRes) { sclError("taosHashInit failed, num:%d", SCL_DEFAULT_OP_NUM); @@ -1028,10 +1029,88 @@ int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes) { *pRes = pNode; _return: + sclFreeRes(ctx.pRes); return code; } +static int32_t sclGetMinusOperatorResType(SOperatorNode* pOp) { + if (!IS_MATHABLE_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE; + pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + return TSDB_CODE_SUCCESS; +} + +static int32_t sclGetMathOperatorResType(SOperatorNode* pOp) { + SDataType ldt = ((SExprNode*)(pOp->pLeft))->resType; + SDataType rdt = ((SExprNode*)(pOp->pRight))->resType; + if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_TIMESTAMP == rdt.type) || + (TSDB_DATA_TYPE_TIMESTAMP == ldt.type && (IS_VAR_DATA_TYPE(rdt.type) || IS_FLOAT_TYPE(rdt.type))) || + (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && (IS_VAR_DATA_TYPE(ldt.type) || IS_FLOAT_TYPE(ldt.type)))) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && IS_INTEGER_TYPE(rdt.type)) || + (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && IS_INTEGER_TYPE(ldt.type)) || + (TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_BOOL == rdt.type) || + (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && TSDB_DATA_TYPE_BOOL == ldt.type)) { + pOp->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP; + pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes; + } else { + pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE; + pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + } + return TSDB_CODE_SUCCESS; +} + +static int32_t sclGetCompOperatorResType(SOperatorNode* pOp) { + SDataType ldt = ((SExprNode*)(pOp->pLeft))->resType; + if (OP_TYPE_IN == pOp->opType || OP_TYPE_NOT_IN == pOp->opType) { + ((SExprNode*)(pOp->pRight))->resType = ldt; + } else if (nodesIsRegularOp(pOp)) { + SDataType rdt = ((SExprNode*)(pOp->pRight))->resType; + if (!IS_VAR_DATA_TYPE(ldt.type) || QUERY_NODE_VALUE != nodeType(pOp->pRight) || + (!IS_STR_DATA_TYPE(rdt.type) && (rdt.type != TSDB_DATA_TYPE_NULL))) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + pOp->node.resType.type = TSDB_DATA_TYPE_BOOL; + pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes; + return TSDB_CODE_SUCCESS; +} + +static int32_t sclGetJsonOperatorResType(SOperatorNode* pOp) { + SDataType ldt = ((SExprNode*)(pOp->pLeft))->resType; + SDataType rdt = ((SExprNode*)(pOp->pRight))->resType; + if (TSDB_DATA_TYPE_JSON != ldt.type || !IS_STR_DATA_TYPE(rdt.type)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + if (pOp->opType == OP_TYPE_JSON_GET_VALUE) { + pOp->node.resType.type = TSDB_DATA_TYPE_JSON; + } else if (pOp->opType == OP_TYPE_JSON_CONTAINS) { + pOp->node.resType.type = TSDB_DATA_TYPE_BOOL; + } + pOp->node.resType.bytes = tDataTypes[pOp->node.resType.type].bytes; + return TSDB_CODE_SUCCESS; +} + +static int32_t sclGetBitwiseOperatorResType(SOperatorNode* pOp) { + pOp->node.resType.type = TSDB_DATA_TYPE_BIGINT; + pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; + return TSDB_CODE_SUCCESS; +} + + +int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes) { + return sclCalcConstants(pNode, false, pRes); +} + +int32_t scalarCalculateConstantsFromDual(SNode *pNode, SNode **pRes) { + return sclCalcConstants(pNode, true, pRes); +} + int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) { if (NULL == pNode || NULL == pBlockList) { SCL_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -1075,74 +1154,6 @@ _return: return code; } -static int32_t getMinusOperatorResultType(SOperatorNode* pOp) { - if (!IS_MATHABLE_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE; - pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; - return TSDB_CODE_SUCCESS; -} - -static int32_t getArithmeticOperatorResultType(SOperatorNode* pOp) { - SDataType ldt = ((SExprNode*)(pOp->pLeft))->resType; - SDataType rdt = ((SExprNode*)(pOp->pRight))->resType; - if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_TIMESTAMP == rdt.type) || - (TSDB_DATA_TYPE_TIMESTAMP == ldt.type && (IS_VAR_DATA_TYPE(rdt.type) || IS_FLOAT_TYPE(rdt.type))) || - (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && (IS_VAR_DATA_TYPE(ldt.type) || IS_FLOAT_TYPE(ldt.type)))) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - - if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && IS_INTEGER_TYPE(rdt.type)) || - (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && IS_INTEGER_TYPE(ldt.type)) || - (TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_BOOL == rdt.type) || - (TSDB_DATA_TYPE_TIMESTAMP == rdt.type && TSDB_DATA_TYPE_BOOL == ldt.type)) { - pOp->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP; - pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes; - } else { - pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE; - pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; - } - return TSDB_CODE_SUCCESS; -} - -static int32_t getComparisonOperatorResultType(SOperatorNode* pOp) { - SDataType ldt = ((SExprNode*)(pOp->pLeft))->resType; - if (OP_TYPE_IN == pOp->opType || OP_TYPE_NOT_IN == pOp->opType) { - ((SExprNode*)(pOp->pRight))->resType = ldt; - } else if (nodesIsRegularOp(pOp)) { - SDataType rdt = ((SExprNode*)(pOp->pRight))->resType; - if (!IS_VAR_DATA_TYPE(ldt.type) || QUERY_NODE_VALUE != nodeType(pOp->pRight) || - (!IS_STR_DATA_TYPE(rdt.type) && (rdt.type != TSDB_DATA_TYPE_NULL))) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - } - pOp->node.resType.type = TSDB_DATA_TYPE_BOOL; - pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes; - return TSDB_CODE_SUCCESS; -} - -static int32_t getJsonOperatorResultType(SOperatorNode* pOp) { - SDataType ldt = ((SExprNode*)(pOp->pLeft))->resType; - SDataType rdt = ((SExprNode*)(pOp->pRight))->resType; - if (TSDB_DATA_TYPE_JSON != ldt.type || !IS_STR_DATA_TYPE(rdt.type)) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - if (pOp->opType == OP_TYPE_JSON_GET_VALUE) { - pOp->node.resType.type = TSDB_DATA_TYPE_JSON; - } else if (pOp->opType == OP_TYPE_JSON_CONTAINS) { - pOp->node.resType.type = TSDB_DATA_TYPE_BOOL; - } - pOp->node.resType.bytes = tDataTypes[pOp->node.resType.type].bytes; - return TSDB_CODE_SUCCESS; -} - -static int32_t getBitwiseOperatorResultType(SOperatorNode* pOp) { - pOp->node.resType.type = TSDB_DATA_TYPE_BIGINT; - pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; - return TSDB_CODE_SUCCESS; -} - int32_t scalarGetOperatorResultType(SOperatorNode* pOp) { if (TSDB_DATA_TYPE_BLOB == ((SExprNode*)(pOp->pLeft))->resType.type || (NULL != pOp->pRight && TSDB_DATA_TYPE_BLOB == ((SExprNode*)(pOp->pRight))->resType.type)) { @@ -1155,15 +1166,15 @@ int32_t scalarGetOperatorResultType(SOperatorNode* pOp) { case OP_TYPE_MULTI: case OP_TYPE_DIV: case OP_TYPE_REM: - return getArithmeticOperatorResultType(pOp); + return sclGetMathOperatorResType(pOp); case OP_TYPE_MINUS: - return getMinusOperatorResultType(pOp); + return sclGetMinusOperatorResType(pOp); case OP_TYPE_ASSIGN: pOp->node.resType = ((SExprNode*)(pOp->pLeft))->resType; break; case OP_TYPE_BIT_AND: case OP_TYPE_BIT_OR: - return getBitwiseOperatorResultType(pOp); + return sclGetBitwiseOperatorResType(pOp); case OP_TYPE_GREATER_THAN: case OP_TYPE_GREATER_EQUAL: case OP_TYPE_LOWER_THAN: @@ -1184,10 +1195,10 @@ int32_t scalarGetOperatorResultType(SOperatorNode* pOp) { case OP_TYPE_NMATCH: case OP_TYPE_IN: case OP_TYPE_NOT_IN: - return getComparisonOperatorResultType(pOp); + return sclGetCompOperatorResType(pOp); case OP_TYPE_JSON_GET_VALUE: case OP_TYPE_JSON_CONTAINS: - return getJsonOperatorResultType(pOp); + return sclGetJsonOperatorResType(pOp); default: break; } diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index bf457d07eb..39b2f04f3e 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -1055,7 +1055,7 @@ static void vectorMathAddHelper(SColumnInfoData* pLeftCol, SColumnInfoData* pRig } } -static void vectorMathBigintAddHelper(SColumnInfoData* pLeftCol, SColumnInfoData* pRightCol, SColumnInfoData* pOutputCol, int32_t numOfRows, int32_t step, int32_t i) { +static void vectorMathTsAddHelper(SColumnInfoData* pLeftCol, SColumnInfoData* pRightCol, SColumnInfoData* pOutputCol, int32_t numOfRows, int32_t step, int32_t i) { _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); @@ -1069,7 +1069,8 @@ static void vectorMathBigintAddHelper(SColumnInfoData* pLeftCol, SColumnInfoData colDataAppendNULL(pOutputCol, i); continue; // TODO set null or ignore } - *output = getVectorBigintValueFnLeft(pLeftCol->pData, i) + getVectorBigintValueFnRight(pRightCol->pData, 0); + *output = taosTimeAdd(getVectorBigintValueFnLeft(pLeftCol->pData, i), getVectorBigintValueFnRight(pRightCol->pData, 0), + pRightCol->info.scale, pRightCol->info.precision); } } } @@ -1116,7 +1117,17 @@ void vectorMathAdd(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); - if (pLeft->numOfRows == pRight->numOfRows) { + if (pLeft->numOfRows == 1 && pRight->numOfRows == 1) { + if (GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_TIMESTAMP) { + vectorMathTsAddHelper(pLeftCol, pRightCol, pOutputCol, pRight->numOfRows, step, i); + } else { + vectorMathTsAddHelper(pRightCol, pLeftCol, pOutputCol, pRight->numOfRows, step, i); + } + } else if (pLeft->numOfRows == 1) { + vectorMathTsAddHelper(pRightCol, pLeftCol, pOutputCol, pRight->numOfRows, step, i); + } else if (pRight->numOfRows == 1) { + vectorMathTsAddHelper(pLeftCol, pRightCol, pOutputCol, pLeft->numOfRows, step, i); + } else if (pLeft->numOfRows == pRight->numOfRows) { for (; i < pRight->numOfRows && i >= 0; i += step, output += 1) { if (IS_NULL) { colDataAppendNULL(pOutputCol, i); @@ -1124,11 +1135,7 @@ void vectorMathAdd(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut } *output = getVectorBigintValueFnLeft(pLeftCol->pData, i) + getVectorBigintValueFnRight(pRightCol->pData, i); } - } else if (pLeft->numOfRows == 1) { - vectorMathBigintAddHelper(pRightCol, pLeftCol, pOutputCol, pRight->numOfRows, step, i); - } else if (pRight->numOfRows == 1) { - vectorMathBigintAddHelper(pLeftCol, pRightCol, pOutputCol, pLeft->numOfRows, step, i); - } + } } else { double *output = (double *)pOutputCol->pData; _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); @@ -1174,7 +1181,7 @@ static void vectorMathSubHelper(SColumnInfoData* pLeftCol, SColumnInfoData* pRig } } -static void vectorMathBigintSubHelper(SColumnInfoData* pLeftCol, SColumnInfoData* pRightCol, SColumnInfoData* pOutputCol, int32_t numOfRows, int32_t step, int32_t factor, int32_t i) { +static void vectorMathTsSubHelper(SColumnInfoData* pLeftCol, SColumnInfoData* pRightCol, SColumnInfoData* pOutputCol, int32_t numOfRows, int32_t step, int32_t factor, int32_t i) { _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); @@ -1188,7 +1195,9 @@ static void vectorMathBigintSubHelper(SColumnInfoData* pLeftCol, SColumnInfoData colDataAppendNULL(pOutputCol, i); continue; // TODO set null or ignore } - *output = (getVectorBigintValueFnLeft(pLeftCol->pData, i) - getVectorBigintValueFnRight(pRightCol->pData, 0)) * factor; + *output = taosTimeSub(getVectorBigintValueFnLeft(pLeftCol->pData, i), getVectorBigintValueFnRight(pRightCol->pData, 0), + pRightCol->info.scale, pRightCol->info.precision); + } } } @@ -1211,7 +1220,13 @@ void vectorMathSub(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); - if (pLeft->numOfRows == pRight->numOfRows) { + if (pLeft->numOfRows == 1 && pRight->numOfRows == 1) { + vectorMathTsSubHelper(pLeftCol, pRightCol, pOutputCol, pLeft->numOfRows, step, 1, i); + } else if (pLeft->numOfRows == 1) { + vectorMathTsSubHelper(pRightCol, pLeftCol, pOutputCol, pRight->numOfRows, step, -1, i); + } else if (pRight->numOfRows == 1) { + vectorMathTsSubHelper(pLeftCol, pRightCol, pOutputCol, pLeft->numOfRows, step, 1, i); + } else if (pLeft->numOfRows == pRight->numOfRows) { for (; i < pRight->numOfRows && i >= 0; i += step, output += 1) { if (IS_NULL) { colDataAppendNULL(pOutputCol, i); @@ -1219,10 +1234,6 @@ void vectorMathSub(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut } *output = getVectorBigintValueFnLeft(pLeftCol->pData, i) - getVectorBigintValueFnRight(pRightCol->pData, i); } - } else if (pLeft->numOfRows == 1) { - vectorMathBigintSubHelper(pRightCol, pLeftCol, pOutputCol, pRight->numOfRows, step, -1, i); - } else if (pRight->numOfRows == 1) { - vectorMathBigintSubHelper(pLeftCol, pRightCol, pOutputCol, pLeft->numOfRows, step, 1, i); } } else { double *output = (double *)pOutputCol->pData; diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 50d4c04a93..b4f9df8e5f 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -197,5 +197,6 @@ # --- scalar ./test.sh -f tsim/scalar/in.sim +./test.sh -f tsim/scalar/scalar.sim #======================b1-end=============== diff --git a/tests/script/tsim/scalar/scalar.sim b/tests/script/tsim/scalar/scalar.sim new file mode 100644 index 0000000000..32224e33ba --- /dev/null +++ b/tests/script/tsim/scalar/scalar.sim @@ -0,0 +1,67 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ======== step1 +sql drop database if exists db1; +sql create database db1 vgroups 3; +sql use db1; +sql create stable st1 (fts timestamp, fbool bool, ftiny tinyint, fsmall smallint, fint int, fbig bigint, futiny tinyint unsigned, fusmall smallint unsigned, fuint int unsigned, fubig bigint unsigned, ffloat float, fdouble double, fbin binary(10), fnchar nchar(10)) tags(tts timestamp, tbool bool, ttiny tinyint, tsmall smallint, tint int, tbig bigint, tutiny tinyint unsigned, tusmall smallint unsigned, tuint int unsigned, tubig bigint unsigned, tfloat float, tdouble double, tbin binary(10), tnchar nchar(10)); +sql create table tb1 using st1 tags('2022-07-10 16:31:00', true, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); +sql create table tb2 using st1 tags('2022-07-10 16:32:00', false, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql create table tb3 using st1 tags('2022-07-10 16:33:00', true, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); + +sql insert into tb1 values ('2022-07-10 16:31:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); +sql insert into tb1 values ('2022-07-10 16:31:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb1 values ('2022-07-10 16:31:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); +sql insert into tb1 values ('2022-07-10 16:31:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb1 values ('2022-07-10 16:31:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); + +sql insert into tb2 values ('2022-07-10 16:32:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); +sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb2 values ('2022-07-10 16:32:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); +sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb2 values ('2022-07-10 16:32:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); + +sql insert into tb3 values ('2022-07-10 16:33:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); +sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb3 values ('2022-07-10 16:33:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); +sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); + +sql select 1+1n; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + + +sql select cast(1 as timestamp)+1n; +if $rows != 1 then + return -1 +endi +if $data00 != @70-02-01 08:00:00.000@ then + return -1 +endi + +sql select 1-1n; +if $rows != 1 then + return -1 +endi + +sql select cast(1 as timestamp)-1y; +if $rows != 1 then + return -1 +endi +if $data00 != @69-01-01 08:00:00.000@ then + return -1 +endi + +sql select 1n-now(); +sql select 1n+now(); +#sql select avg(4n); + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From c2e2e73cc59a56fd1521879d45c21fbadb8e70da Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 17:03:26 +0800 Subject: [PATCH 37/56] fix: get mnode epset from sync module --- source/dnode/mnode/impl/src/mndMnode.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index c03951b1d8..d7eaa72202 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -218,6 +218,7 @@ bool mndIsMnode(SMnode *pMnode, int32_t dnodeId) { } void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { +#if 0 SSdb *pSdb = pMnode->pSdb; int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE); void *pIter = NULL; @@ -237,6 +238,9 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port); sdbRelease(pSdb, pObj); } +#else + syncGetRetryEpSet(pMnode->syncMgmt.sync, pEpSet); +#endif } static int32_t mndSetCreateMnodeRedoLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) { From 32cb6755a2768d27740fa9c964e2678a36dfb06b Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Wed, 13 Jul 2022 17:08:27 +0800 Subject: [PATCH 38/56] feat(stream): optimize close window --- source/libs/executor/src/timewindowoperator.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 943763dadb..2d64e6bd1c 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1338,9 +1338,9 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, uint64_t groupId = *(uint64_t*)key; ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t)); - SResultRowInfo dumyInfo; - dumyInfo.cur.pageId = -1; - STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval, TSDB_ORDER_ASC); + STimeWindow win; + win.skey = ts; + win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; SWinRes winRe = { .ts = win.skey, .groupId = groupId, From ca754adaa6ee13c2553a696574f2dd1b686931a5 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 13 Jul 2022 17:14:04 +0800 Subject: [PATCH 39/56] fix: add agg processing --- include/util/taoserror.h | 3 ++- source/libs/command/src/command.c | 10 +++++++--- source/libs/scalar/src/scalar.c | 2 +- source/util/src/terror.c | 1 + 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index ce434612c3..c057d48875 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -584,7 +584,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_INTERP_CLAUSE TAOS_DEF_ERROR_CODE(0, 0x265D) #define TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN TAOS_DEF_ERROR_CODE(0, 0x265E) #define TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE TAOS_DEF_ERROR_CODE(0, 0x265F) -#define TSDB_CODE_PAR_INVALID_SMA_INDEX TAOS_DEF_ERROR_CODE(0, 0x265C) +#define TSDB_CODE_PAR_INVALID_SMA_INDEX TAOS_DEF_ERROR_CODE(0, 0x2660) +#define TSDB_CODE_PAR_INVALID_SELECTED_EXPR TAOS_DEF_ERROR_CODE(0, 0x2661) //planner #define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700) diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index a2816209a9..034778e5bf 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -570,10 +570,14 @@ int32_t buildSelectResultDataBlock(SNodeList* pProjects, SSDataBlock* pBlock) { int32_t index = 0; SNode* pProj = NULL; FOREACH(pProj, pProjects) { - if (((SValueNode*)pProj)->isNull) { - colDataAppend(taosArrayGet(pBlock->pDataBlock, index++), 0, NULL, true); + if (QUERY_NODE_VALUE != nodeType(pProj)) { + return TSDB_CODE_PAR_INVALID_SELECTED_EXPR; } else { - colDataAppend(taosArrayGet(pBlock->pDataBlock, index++), 0, nodesGetValueFromNode((SValueNode*)pProj), false); + if (((SValueNode*)pProj)->isNull) { + colDataAppend(taosArrayGet(pBlock->pDataBlock, index++), 0, NULL, true); + } else { + colDataAppend(taosArrayGet(pBlock->pDataBlock, index++), 0, nodesGetValueFromNode((SValueNode*)pProj), false); + } } } diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index a2e9f7a1cd..bdfc411fa6 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -700,7 +700,7 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) { SFunctionNode *node = (SFunctionNode *)*pNode; SNode* tnode = NULL; - if ((!fmIsScalarFunc(node->funcId)) && (!ctx->dual)) { + if (!fmIsScalarFunc(node->funcId)) { return DEAL_RES_CONTINUE; } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index ef6697b3b5..2364c53a9a 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -561,6 +561,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COL_JSON, "Only tag can be jso TAOS_DEFINE_ERROR(TSDB_CODE_PAR_VALUE_TOO_LONG, "Value too long for column/tag") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DELETE_WHERE, "The DELETE statement must have a definite time window range") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_REDISTRIBUTE_VG, "The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_SELECTED_EXPR, "Invalid SELECTed expression") //planner TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "Planner internal error") From 2ea11ba9c1773936c396e0a1943e3ed0321b128b Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 16:54:49 +0800 Subject: [PATCH 40/56] test: restore some 2.0 case --- tests/script/general/field/testSuite.sim | 14 ----- tests/script/jenkins/basic.txt | 61 ++++++++++++------- tests/script/{general => tsim}/field/2.sim | 12 ++-- tests/script/{general => tsim}/field/3.sim | 18 +++--- tests/script/{general => tsim}/field/4.sim | 22 +++---- tests/script/{general => tsim}/field/5.sim | 26 ++++---- tests/script/{general => tsim}/field/6.sim | 30 +++++---- .../script/{general => tsim}/field/bigint.sim | 9 +-- .../script/{general => tsim}/field/binary.sim | 6 +- tests/script/{general => tsim}/field/bool.sim | 9 +-- .../script/{general => tsim}/field/double.sim | 9 +-- .../script/{general => tsim}/field/float.sim | 10 ++- tests/script/{general => tsim}/field/int.sim | 10 ++- .../script/{general => tsim}/field/single.sim | 6 +- .../{general => tsim}/field/smallint.sim | 10 ++- .../{general => tsim}/field/tinyint.sim | 9 +-- .../field/unsigined_bigint.sim | 13 ++-- 17 files changed, 120 insertions(+), 154 deletions(-) delete mode 100644 tests/script/general/field/testSuite.sim rename tests/script/{general => tsim}/field/2.sim (96%) rename tests/script/{general => tsim}/field/3.sim (97%) rename tests/script/{general => tsim}/field/4.sim (97%) rename tests/script/{general => tsim}/field/5.sim (97%) rename tests/script/{general => tsim}/field/6.sim (97%) rename tests/script/{general => tsim}/field/bigint.sim (94%) rename tests/script/{general => tsim}/field/binary.sim (96%) rename tests/script/{general => tsim}/field/bool.sim (94%) rename tests/script/{general => tsim}/field/double.sim (94%) rename tests/script/{general => tsim}/field/float.sim (94%) rename tests/script/{general => tsim}/field/int.sim (94%) rename tests/script/{general => tsim}/field/single.sim (98%) rename tests/script/{general => tsim}/field/smallint.sim (94%) rename tests/script/{general => tsim}/field/tinyint.sim (94%) rename tests/script/{general => tsim}/field/unsigined_bigint.sim (94%) diff --git a/tests/script/general/field/testSuite.sim b/tests/script/general/field/testSuite.sim deleted file mode 100644 index d12f0ebbd4..0000000000 --- a/tests/script/general/field/testSuite.sim +++ /dev/null @@ -1,14 +0,0 @@ -# run general/field/single.sim -run general/field/bool.sim -run general/field/smallint.sim -run general/field/tinyint.sim -run general/field/int.sim -run general/field/bigint.sim -run general/field/float.sim -run general/field/double.sim -# run general/field/binary.sim -# run general/field/2.sim -# run general/field/3.sim -# run general/field/4.sim -# run general/field/5.sim -# run general/field/6.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 32279b3a4a..0c3126bac7 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -1,33 +1,12 @@ #======================b1-start=============== -# ---- alter -./test.sh -f tsim/alter/cached_schema_after_alter.sim -./test.sh -f tsim/alter/dnode.sim -#./test.sh -f tsim/alter/table.sim - # ---- user ./test.sh -f tsim/user/basic.sim ./test.sh -f tsim/user/password.sim ./test.sh -f tsim/user/privilege_db.sim ./test.sh -f tsim/user/privilege_sysinfo.sim -# ---- cache -./test.sh -f tsim/cache/new_metrics.sim -./test.sh -f tsim/cache/restart_table.sim -./test.sh -f tsim/cache/restart_metrics.sim - -# ---- column -./test.sh -f tsim/column/commit.sim -./test.sh -f tsim/column/metrics.sim -./test.sh -f tsim/column/table.sim - -# ---- compress -./test.sh -f tsim/compress/commitlog.sim -./test.sh -f tsim/compress/compress2.sim -./test.sh -f tsim/compress/compress.sim -./test.sh -f tsim/compress/uncompress.sim - # ---- db ./test.sh -f tsim/db/alter_option.sim # ./test.sh -f tsim/db/alter_replica_13.sim @@ -108,7 +87,7 @@ ./test.sh -f tsim/mnode/basic1.sim ./test.sh -f tsim/mnode/basic2.sim ./test.sh -f tsim/mnode/basic3.sim -#./test.sh -f tsim/mnode/basic4.sim +./test.sh -f tsim/mnode/basic4.sim ./test.sh -f tsim/mnode/basic5.sim # ---- show @@ -225,4 +204,42 @@ # --- scalar ./test.sh -f tsim/scalar/in.sim +# ---- alter +./test.sh -f tsim/alter/cached_schema_after_alter.sim +./test.sh -f tsim/alter/dnode.sim +#./test.sh -f tsim/alter/table.sim + +# ---- cache +./test.sh -f tsim/cache/new_metrics.sim +./test.sh -f tsim/cache/restart_table.sim +./test.sh -f tsim/cache/restart_metrics.sim + +# ---- column +./test.sh -f tsim/column/commit.sim +./test.sh -f tsim/column/metrics.sim +./test.sh -f tsim/column/table.sim + +# ---- compress +./test.sh -f tsim/compress/commitlog.sim +./test.sh -f tsim/compress/compress2.sim +./test.sh -f tsim/compress/compress.sim +./test.sh -f tsim/compress/uncompress.sim + +# ---- field +./test.sh -f tsim/field/2.sim +./test.sh -f tsim/field/3.sim +./test.sh -f tsim/field/4.sim +./test.sh -f tsim/field/5.sim +./test.sh -f tsim/field/6.sim +./test.sh -f tsim/field/binary.sim +./test.sh -f tsim/field/bigint.sim +./test.sh -f tsim/field/bool.sim +./test.sh -f tsim/field/double.sim +./test.sh -f tsim/field/float.sim +./test.sh -f tsim/field/int.sim +./test.sh -f tsim/field/single.sim +./test.sh -f tsim/field/smallint.sim +./test.sh -f tsim/field/tinyint.sim +./test.sh -f tsim/field/unsigined_bigint.sim + #======================b1-end=============== diff --git a/tests/script/general/field/2.sim b/tests/script/tsim/field/2.sim similarity index 96% rename from tests/script/general/field/2.sim rename to tests/script/tsim/field/2.sim index cc6889fd75..b5c501ceed 100644 --- a/tests/script/general/field/2.sim +++ b/tests/script/tsim/field/2.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -280,18 +278,18 @@ if $data00 != 25 then endi print =============== step12 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol2 = 1 interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol2 = 1 group by tgcol order by tgcol desc print $db -print select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol2 = 1 interval(1d) group by tgcol +print select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol2 = 1 group by tgcol print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/3.sim b/tests/script/tsim/field/3.sim similarity index 97% rename from tests/script/general/field/3.sim rename to tests/script/tsim/field/3.sim index cb3c6621ac..661bc6a85a 100644 --- a/tests/script/general/field/3.sim +++ b/tests/script/tsim/field/3.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -493,28 +491,28 @@ if $data00 != 25 then endi print =============== step19 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol1 order by tgcol1 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by tgcol1 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol2 order by tgcol2 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by tgcol2 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol3 order by tgcol3 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol3 order by tgcol3 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/4.sim b/tests/script/tsim/field/4.sim similarity index 97% rename from tests/script/general/field/4.sim rename to tests/script/tsim/field/4.sim index 2d893da777..734179c5bb 100644 --- a/tests/script/general/field/4.sim +++ b/tests/script/tsim/field/4.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -677,34 +675,34 @@ if $data00 != 25 then endi print =============== step24 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol1 order by tgcol1 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by tgcol1 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol2 order by tgcol2 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by tgcol2 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol3 order by tgcol3 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol3 order by tgcol3 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 interval(1d) group by tgcol4 order by tgcol4 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol4 order by tgcol4 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/5.sim b/tests/script/tsim/field/5.sim similarity index 97% rename from tests/script/general/field/5.sim rename to tests/script/tsim/field/5.sim index e1421bdb4f..5185d8556e 100644 --- a/tests/script/general/field/5.sim +++ b/tests/script/tsim/field/5.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -794,40 +792,40 @@ endi print =============== step27 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol1 order by tgcol1 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by tgcol1 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol2 order by tgcol2 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by tgcol2 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol3 order by tgcol3 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol3 order by tgcol3 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 interval(1d) group by tgcol4 order by tgcol4 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol4 order by tgcol4 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 interval(1d) group by tgcol5 order by tgcol5 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol5 order by tgcol5 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/6.sim b/tests/script/tsim/field/6.sim similarity index 97% rename from tests/script/general/field/6.sim rename to tests/script/tsim/field/6.sim index 27475d591f..8ceefae228 100644 --- a/tests/script/general/field/6.sim +++ b/tests/script/tsim/field/6.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -943,46 +941,46 @@ if $data00 != 25 then endi print =============== step31 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol1 order by tgcol1 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by tgcol1 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol2 order by tgcol2 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by tgcol2 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 interval(1d) group by tgcol3 order by tgcol3 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol3 order by tgcol3 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 interval(1d) group by tgcol4 order by tgcol4 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol4 order by tgcol4 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 interval(1d) group by tgcol5 order by tgcol5 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol5 order by tgcol5 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 interval(1d) group by tgcol6 order by tgcol6 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol6 order by tgcol6 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/bigint.sim b/tests/script/tsim/field/bigint.sim similarity index 94% rename from tests/script/general/field/bigint.sim rename to tests/script/tsim/field/bigint.sim index cfe8c561f0..c580a4df1c 100644 --- a/tests/script/general/field/bigint.sim +++ b/tests/script/tsim/field/bigint.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start @@ -146,16 +143,16 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/binary.sim b/tests/script/tsim/field/binary.sim similarity index 96% rename from tests/script/general/field/binary.sim rename to tests/script/tsim/field/binary.sim index 821dbc9a82..59005e1ef1 100644 --- a/tests/script/general/field/binary.sim +++ b/tests/script/tsim/field/binary.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start @@ -47,7 +44,6 @@ while $i < 10 $i = $i + 1 endw - print =============== step2 sql select * from $mt where tbcol = '0' @@ -70,7 +66,7 @@ sql_error select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), f print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/bool.sim b/tests/script/tsim/field/bool.sim similarity index 94% rename from tests/script/general/field/bool.sim rename to tests/script/tsim/field/bool.sim index d94071b328..37292e9758 100644 --- a/tests/script/general/field/bool.sim +++ b/tests/script/tsim/field/bool.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start @@ -144,8 +141,8 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true interval(1d) group by tgcol order by tgcol desc -print select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true group by tgcol order by tgcol desc +print select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data01 != 100 then return -1 @@ -154,7 +151,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/double.sim b/tests/script/tsim/field/double.sim similarity index 94% rename from tests/script/general/field/double.sim rename to tests/script/tsim/field/double.sim index 0c9c23e304..e7b1c8e8af 100644 --- a/tests/script/general/field/double.sim +++ b/tests/script/tsim/field/double.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start @@ -144,16 +141,16 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/float.sim b/tests/script/tsim/field/float.sim similarity index 94% rename from tests/script/general/field/float.sim rename to tests/script/tsim/field/float.sim index 00423c00b8..159a4b60ab 100644 --- a/tests/script/general/field/float.sim +++ b/tests/script/tsim/field/float.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -144,16 +142,16 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/int.sim b/tests/script/tsim/field/int.sim similarity index 94% rename from tests/script/general/field/int.sim rename to tests/script/tsim/field/int.sim index 0e322e4f12..2b5b70141a 100644 --- a/tests/script/general/field/int.sim +++ b/tests/script/tsim/field/int.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -144,16 +142,16 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/single.sim b/tests/script/tsim/field/single.sim similarity index 98% rename from tests/script/general/field/single.sim rename to tests/script/tsim/field/single.sim index 3f6bf4309f..115e76ffeb 100644 --- a/tests/script/general/field/single.sim +++ b/tests/script/tsim/field/single.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -211,7 +209,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/smallint.sim b/tests/script/tsim/field/smallint.sim similarity index 94% rename from tests/script/general/field/smallint.sim rename to tests/script/tsim/field/smallint.sim index 78b2b998cf..975f02bf9b 100644 --- a/tests/script/general/field/smallint.sim +++ b/tests/script/tsim/field/smallint.sim @@ -1,10 +1,8 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect + print ======================== dnode1 start $dbPrefix = db @@ -144,16 +142,16 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/tinyint.sim b/tests/script/tsim/field/tinyint.sim similarity index 94% rename from tests/script/general/field/tinyint.sim rename to tests/script/tsim/field/tinyint.sim index 7e1a0c6e80..ff24e484a7 100644 --- a/tests/script/general/field/tinyint.sim +++ b/tests/script/tsim/field/tinyint.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start @@ -145,16 +142,16 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/field/unsigined_bigint.sim b/tests/script/tsim/field/unsigined_bigint.sim similarity index 94% rename from tests/script/general/field/unsigined_bigint.sim rename to tests/script/tsim/field/unsigined_bigint.sim index 260128b5c2..d8421e7626 100644 --- a/tests/script/general/field/unsigined_bigint.sim +++ b/tests/script/tsim/field/unsigined_bigint.sim @@ -1,12 +1,9 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect -print ======================== dnode1 start +print ======================== dnode1 start $dbPrefix = db $tbPrefix = tb $mtPrefix = st @@ -27,7 +24,7 @@ $i = 0 while $i < 5 $tb = $tbPrefix . $i sql create table $tb using $mt tags( 0 ) - sql create table $tb using $mt tags( -111 ) + sql_error create table $tb using $mt tags( -111 ) $x = 0 while $x < $rowNum $ms = $x . m @@ -150,16 +147,16 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 interval(1d) group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi From a717042acf1da787baa793a857ef77b3f713f7c2 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 13 Jul 2022 16:43:58 +0800 Subject: [PATCH 41/56] refactor(stream): simple batch --- include/libs/stream/tstream.h | 2 +- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 16 ++++++++-------- source/libs/executor/src/timewindowoperator.c | 4 ++-- source/libs/stream/src/streamExec.c | 6 +++--- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index cae14a6d59..a08db7b8f8 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -307,7 +307,7 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED); return -1; } - qInfo("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data); + qDebug("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data); taosWriteQitem(pTask->inputQueue->queue, pSubmitClone); // qStreamInput(pTask->exec.executor, pSubmitClone); } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 7e7139ba53..9571a83116 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -357,16 +357,16 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_COMMIT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_HEARTBEAT, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DROP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DEPLOY, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RUN, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DISPATCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DISPATCH_RSP, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RECOVER, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RECOVER_RSP, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RUN, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DISPATCH, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DISPATCH_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RECOVER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RECOVER_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index da78480d3b..773484a9b3 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -653,11 +653,11 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num void printDataBlock(SSDataBlock* pBlock, const char* flag) { if (pBlock == NULL) { - qInfo("======printDataBlock Block is Null"); + qDebug("======printDataBlock Block is Null"); return; } char* pBuf = NULL; - qInfo("%s", dumpBlockData(pBlock, flag, &pBuf)); + qDebug("%s", dumpBlockData(pBlock, flag, &pBuf)); taosMemoryFree(pBuf); } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 9644d9eac6..d0d81e3343 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -80,7 +80,7 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) { while (1) { SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue); if (qItem == NULL) { - qInfo("stream exec over, queue empty"); + qDebug("stream exec over, queue empty"); break; } if (data == NULL) { @@ -101,9 +101,9 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) { } if (data == NULL) break; - qInfo("stream task %d exec begin, batch msg: %d", pTask->taskId, cnt); + qDebug("stream task %d exec begin, batch msg: %d", pTask->taskId, cnt); streamTaskExecImpl(pTask, data, pRes); - qInfo("stream task %d exec end", pTask->taskId); + qDebug("stream task %d exec end", pTask->taskId); if (pTask->taskStatus == TASK_STATUS__DROPPING) { taosArrayDestroyEx(pRes, (FDelete)tDeleteSSDataBlock); From 78abf57f1ad13cfd219bdfb5d78ee15bb83b7bf2 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Wed, 13 Jul 2022 17:35:34 +0800 Subject: [PATCH 42/56] test:modify case --- tests/system-test/7-tmq/tmqUpdate-1ctb.py | 2 +- tests/system-test/fulltest.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py index 3cb364f91d..8513092be9 100644 --- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py +++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py @@ -19,7 +19,7 @@ class TDTestCase: self.snapshot = 0 self.vgroups = 4 self.ctbNum = 1 - self.rowsPerTbl = 100000 + self.rowsPerTbl = 10000 def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index becb5db501..2c116113bc 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -148,7 +148,7 @@ python3 ./test.py -f 7-tmq/subscribeDb2.py python3 ./test.py -f 7-tmq/subscribeDb3.py #python3 ./test.py -f 7-tmq/subscribeDb4.py python3 ./test.py -f 7-tmq/subscribeStb.py -#python3 ./test.py -f 7-tmq/subscribeStb0.py +python3 ./test.py -f 7-tmq/subscribeStb0.py python3 ./test.py -f 7-tmq/subscribeStb1.py python3 ./test.py -f 7-tmq/subscribeStb2.py python3 ./test.py -f 7-tmq/subscribeStb3.py From 312dce4e212a96c1f73eace01a7ebc8fbdd38883 Mon Sep 17 00:00:00 2001 From: "slzhou@taodata.com" Date: Wed, 13 Jul 2022 17:57:59 +0800 Subject: [PATCH 43/56] fix: reset hasGroupId after outputing the previous group results --- source/libs/executor/src/sortoperator.c | 4 +++- source/libs/executor/src/timewindowoperator.c | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 8d9cac3614..9795907404 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -621,7 +621,9 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData break; } } - + if (pInfo->groupSort) { + pInfo->hasGroupId = false; + } if (p->info.rows > 0) { // todo extract method blockDataEnsureCapacity(pDataBlock, p->info.rows); int32_t numOfCols = taosArrayGetSize(pColMatchInfo); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 773484a9b3..1be1488d11 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4507,13 +4507,14 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->order, scanFlag, true); doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes); doFilter(miaInfo->pCondition, pRes); - if (pRes->info.rows > 0) { + if (pRes->info.rows >= pOperator->resultInfo.capacity) { break; } } pRes->info.groupId = miaInfo->groupId; } + miaInfo->hasGroupId = false; if (miaInfo->inputBlocksFinished) { doSetOperatorCompleted(pOperator); From 0ba9d225938e32eabfe6105f6700bc7bddd0ad59 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 17:59:25 +0800 Subject: [PATCH 44/56] test: restore some 2.0 case --- tests/script/general/compute/testSuite.sim | 17 ---------- tests/script/jenkins/basic.txt | 20 +++++++++++ .../script/{general => tsim}/compute/avg.sim | 5 +-- .../{general => tsim}/compute/block_dist.sim | 5 +-- .../{general => tsim}/compute/bottom.sim | 5 +-- .../{general => tsim}/compute/count.sim | 5 +-- .../script/{general => tsim}/compute/diff.sim | 5 +-- .../{general => tsim}/compute/diff2.sim | 5 +-- .../{general => tsim}/compute/first.sim | 5 +-- .../{general => tsim}/compute/interval.sim | 5 +-- .../script/{general => tsim}/compute/last.sim | 5 +-- .../{general => tsim}/compute/last_row.sim | 5 +-- .../{general => tsim}/compute/leastsquare.sim | 5 +-- .../script/{general => tsim}/compute/max.sim | 5 +-- .../script/{general => tsim}/compute/min.sim | 32 ++++++++---------- .../script/{general => tsim}/compute/null.sim | 23 ++++++------- .../{general => tsim}/compute/percentile.sim | 6 +--- .../{general => tsim}/compute/stddev.sim | 17 ++++------ .../script/{general => tsim}/compute/sum.sim | 33 +++++++++---------- .../script/{general => tsim}/compute/top.sim | 29 ++++++++-------- 20 files changed, 94 insertions(+), 143 deletions(-) delete mode 100644 tests/script/general/compute/testSuite.sim rename tests/script/{general => tsim}/compute/avg.sim (97%) rename tests/script/{general => tsim}/compute/block_dist.sim (95%) rename tests/script/{general => tsim}/compute/bottom.sim (95%) rename tests/script/{general => tsim}/compute/count.sim (97%) rename tests/script/{general => tsim}/compute/diff.sim (95%) rename tests/script/{general => tsim}/compute/diff2.sim (97%) rename tests/script/{general => tsim}/compute/first.sim (97%) rename tests/script/{general => tsim}/compute/interval.sim (98%) rename tests/script/{general => tsim}/compute/last.sim (97%) rename tests/script/{general => tsim}/compute/last_row.sim (98%) rename tests/script/{general => tsim}/compute/leastsquare.sim (96%) rename tests/script/{general => tsim}/compute/max.sim (97%) rename tests/script/{general => tsim}/compute/min.sim (87%) rename tests/script/{general => tsim}/compute/null.sim (93%) rename tests/script/{general => tsim}/compute/percentile.sim (96%) rename tests/script/{general => tsim}/compute/stddev.sim (88%) rename tests/script/{general => tsim}/compute/sum.sim (84%) rename tests/script/{general => tsim}/compute/top.sim (81%) diff --git a/tests/script/general/compute/testSuite.sim b/tests/script/general/compute/testSuite.sim deleted file mode 100644 index 91bf4bf0cd..0000000000 --- a/tests/script/general/compute/testSuite.sim +++ /dev/null @@ -1,17 +0,0 @@ -run general/compute/avg.sim -run general/compute/bottom.sim -run general/compute/count.sim -run general/compute/diff.sim -run general/compute/diff2.sim -run general/compute/first.sim -run general/compute/interval.sim -run general/compute/last.sim -run general/compute/leastsquare.sim -run general/compute/max.sim -run general/compute/min.sim -run general/compute/null.sim -run general/compute/percentile.sim -run general/compute/stddev.sim -run general/compute/sum.sim -run general/compute/top.sim -run general/compute/block_dist.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 0c3126bac7..ba94743df8 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -225,6 +225,26 @@ ./test.sh -f tsim/compress/compress.sim ./test.sh -f tsim/compress/uncompress.sim +# ---- compute +#./test.sh -f tsim/compute/avg.sim +#./test.sh -f tsim/compute/block_dist.sim +#./test.sh -f tsim/compute/bottom.sim +#./test.sh -f tsim/compute/count.sim +#./test.sh -f tsim/compute/diff.sim +#./test.sh -f tsim/compute/diff2.sim +#./test.sh -f tsim/compute/first.sim +#./test.sh -f tsim/compute/interval.sim +#./test.sh -f tsim/compute/last_row.sim +#./test.sh -f tsim/compute/last.sim +#./test.sh -f tsim/compute/leastsquare.sim +#./test.sh -f tsim/compute/max.sim +#./test.sh -f tsim/compute/min.sim +#./test.sh -f tsim/compute/null.sim +./test.sh -f tsim/compute/percentile.sim +./test.sh -f tsim/compute/stddev.sim +./test.sh -f tsim/compute/sum.sim +./test.sh -f tsim/compute/top.sim + # ---- field ./test.sh -f tsim/field/2.sim ./test.sh -f tsim/field/3.sim diff --git a/tests/script/general/compute/avg.sim b/tests/script/tsim/compute/avg.sim similarity index 97% rename from tests/script/general/compute/avg.sim rename to tests/script/tsim/compute/avg.sim index db452b0344..2805b65fff 100644 --- a/tests/script/general/compute/avg.sim +++ b/tests/script/tsim/compute/avg.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_av_db @@ -163,7 +160,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/block_dist.sim b/tests/script/tsim/compute/block_dist.sim similarity index 95% rename from tests/script/general/compute/block_dist.sim rename to tests/script/tsim/compute/block_dist.sim index 5343c1db28..201d222af7 100644 --- a/tests/script/general/compute/block_dist.sim +++ b/tests/script/tsim/compute/block_dist.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_di_db @@ -91,7 +88,7 @@ sql_error select _block_dist() from (select * from $mt) print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/bottom.sim b/tests/script/tsim/compute/bottom.sim similarity index 95% rename from tests/script/general/compute/bottom.sim rename to tests/script/tsim/compute/bottom.sim index 7af67ecbf0..cfac02d6d5 100644 --- a/tests/script/general/compute/bottom.sim +++ b/tests/script/tsim/compute/bottom.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_bo_db @@ -98,7 +95,7 @@ step6: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/count.sim b/tests/script/tsim/compute/count.sim similarity index 97% rename from tests/script/general/compute/count.sim rename to tests/script/tsim/compute/count.sim index cf84918f5b..0a6ce93077 100644 --- a/tests/script/general/compute/count.sim +++ b/tests/script/tsim/compute/count.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_co_db @@ -192,7 +189,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/diff.sim b/tests/script/tsim/compute/diff.sim similarity index 95% rename from tests/script/general/compute/diff.sim rename to tests/script/tsim/compute/diff.sim index bc303a9ca5..ba4b32ddbb 100644 --- a/tests/script/general/compute/diff.sim +++ b/tests/script/tsim/compute/diff.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_di_db @@ -91,7 +88,7 @@ step6: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/diff2.sim b/tests/script/tsim/compute/diff2.sim similarity index 97% rename from tests/script/general/compute/diff2.sim rename to tests/script/tsim/compute/diff2.sim index 55fa1daa95..08b52cb37b 100644 --- a/tests/script/general/compute/diff2.sim +++ b/tests/script/tsim/compute/diff2.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_di_db @@ -152,7 +149,7 @@ step6: print =============== clear #sql drop database $db #sql show databases -#if $rows != 0 then +#if $rows != 2 then # return -1 #endi diff --git a/tests/script/general/compute/first.sim b/tests/script/tsim/compute/first.sim similarity index 97% rename from tests/script/general/compute/first.sim rename to tests/script/tsim/compute/first.sim index fce334167b..cf1160dbdb 100644 --- a/tests/script/general/compute/first.sim +++ b/tests/script/tsim/compute/first.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_fi_db @@ -165,7 +162,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/interval.sim b/tests/script/tsim/compute/interval.sim similarity index 98% rename from tests/script/general/compute/interval.sim rename to tests/script/tsim/compute/interval.sim index c21003a646..a8539701c7 100644 --- a/tests/script/general/compute/interval.sim +++ b/tests/script/tsim/compute/interval.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_in_db @@ -199,7 +196,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/last.sim b/tests/script/tsim/compute/last.sim similarity index 97% rename from tests/script/general/compute/last.sim rename to tests/script/tsim/compute/last.sim index 9f20f8c5aa..aa9b041ca9 100644 --- a/tests/script/general/compute/last.sim +++ b/tests/script/tsim/compute/last.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_la_db @@ -169,7 +166,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/last_row.sim b/tests/script/tsim/compute/last_row.sim similarity index 98% rename from tests/script/general/compute/last_row.sim rename to tests/script/tsim/compute/last_row.sim index 3b28b0baa5..867f64fa2e 100644 --- a/tests/script/general/compute/last_row.sim +++ b/tests/script/tsim/compute/last_row.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_la_db @@ -217,7 +214,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/leastsquare.sim b/tests/script/tsim/compute/leastsquare.sim similarity index 96% rename from tests/script/general/compute/leastsquare.sim rename to tests/script/tsim/compute/leastsquare.sim index 1c8af7fe7f..aa83a4e14e 100644 --- a/tests/script/general/compute/leastsquare.sim +++ b/tests/script/tsim/compute/leastsquare.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_le_db @@ -93,7 +90,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/max.sim b/tests/script/tsim/compute/max.sim similarity index 97% rename from tests/script/general/compute/max.sim rename to tests/script/tsim/compute/max.sim index f9665a823d..1b3fac5820 100644 --- a/tests/script/general/compute/max.sim +++ b/tests/script/tsim/compute/max.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_ma_db @@ -169,7 +166,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/min.sim b/tests/script/tsim/compute/min.sim similarity index 87% rename from tests/script/general/compute/min.sim rename to tests/script/tsim/compute/min.sim index 4a9904e065..33e9eb0f3e 100644 --- a/tests/script/general/compute/min.sim +++ b/tests/script/tsim/compute/min.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_mi_db @@ -73,14 +69,14 @@ endi print =============== step5 sql select min(tbcol) as b from $tb interval(1m) -print ===> $data11 -if $data11 != 1 then +print ===> $data10 +if $data10 != 1 then return -1 endi sql select min(tbcol) as b from $tb interval(1d) -print ===> $data01 -if $data01 != 0 then +print ===> $data00 +if $data00 != 0 then return -1 endi @@ -90,8 +86,8 @@ $ms = 1601481600000 + $cc sql select min(tbcol) as b from $tb where ts <= $ms interval(1m) print select min(tbcol) as b from $tb where ts <= $ms interval(1m) -print ===> $data11 -if $data11 != 1 then +print ===> $data10 +if $data10 != 1 then return -1 endi if $rows != 5 then @@ -130,14 +126,14 @@ endi print =============== step9 sql select min(tbcol) as b from $mt interval(1m) -print ===> $data11 -if $data11 != 1 then +print ===> $data10 +if $data10 != 1 then return -1 endi sql select min(tbcol) as b from $mt interval(1d) -print ===> $data01 -if $data01 != 0 then +print ===> $data00 +if $data00 != 0 then return -1 endi @@ -155,9 +151,9 @@ endi print =============== step11 $cc = 4 * 60000 $ms = 1601481600000 + $cc -sql select min(tbcol) as b from $mt where ts <= $ms interval(1m) group by tgcol -print ===> $data11 -if $data11 != 1 then +sql select min(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1m) +print ===> $data10 +if $data10 != 1 then return -1 endi print ===> $rows @@ -168,7 +164,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/null.sim b/tests/script/tsim/compute/null.sim similarity index 93% rename from tests/script/general/compute/null.sim rename to tests/script/tsim/compute/null.sim index cd00b7a69d..30860da48b 100644 --- a/tests/script/general/compute/null.sim +++ b/tests/script/tsim/compute/null.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = db @@ -98,22 +95,26 @@ if $data02 != 19 then return -1 endi -sql select * from $tb where tbcol = NULL -x step3 +sql select * from $tb where tbcol is NULL +if $rows != 1 then return -1 -step3: +endi + +sql_error select * from $tb where tbcol = NULL + +return print =============== step5 -sql create table $tb using $mt tags( NULL ) -# return -1 -#step51: +sql create table tt using $mt tags( NULL ) #sql alter table $tb set tgcol=NULL -x step52 # return -1 #step52: -sql select * from $mt where tgcol = NULL -x step5 +sql select * from $mt where tgcol is NULL +if $rows != 1 then return -1 -step5: +endi print =============== step6 sql select count(tbcol), count(tbcol2), avg(tbcol), avg(tbcol2), sum(tbcol), sum(tbcol2) from $mt @@ -222,7 +223,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/percentile.sim b/tests/script/tsim/compute/percentile.sim similarity index 96% rename from tests/script/general/compute/percentile.sim rename to tests/script/tsim/compute/percentile.sim index b0f4fff8d7..5cba3ad856 100644 --- a/tests/script/general/compute/percentile.sim +++ b/tests/script/tsim/compute/percentile.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_pe_db @@ -125,11 +122,10 @@ if $data00 != 5.000000000 then return -1 endi - print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/stddev.sim b/tests/script/tsim/compute/stddev.sim similarity index 88% rename from tests/script/general/compute/stddev.sim rename to tests/script/tsim/compute/stddev.sim index 772ec8386a..7048399112 100644 --- a/tests/script/general/compute/stddev.sim +++ b/tests/script/tsim/compute/stddev.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_st_db @@ -72,14 +69,14 @@ endi print =============== step5 sql select stddev(tbcol) as b from $tb interval(1m) -print ===> $data01 -if $data01 != 0.000000000 then +print ===> $data00 +if $data00 != 0.000000000 then return -1 endi sql select stddev(tbcol) as b from $tb interval(1d) -print ===> $data01 -if $data01 != 5.766281297 then +print ===> $data00 +if $data00 != 5.766281297 then return -1 endi @@ -88,8 +85,8 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select stddev(tbcol) as b from $tb where ts <= $ms interval(1m) -print ===> $data01 -if $data01 != 0.000000000 then +print ===> $data00 +if $data00 != 0.000000000 then return -1 endi if $rows != 5 then @@ -99,7 +96,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/sum.sim b/tests/script/tsim/compute/sum.sim similarity index 84% rename from tests/script/general/compute/sum.sim rename to tests/script/tsim/compute/sum.sim index 8fad992750..d4185f3204 100644 --- a/tests/script/general/compute/sum.sim +++ b/tests/script/tsim/compute/sum.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_su_db @@ -72,14 +69,14 @@ endi print =============== step5 sql select sum(tbcol) as b from $tb interval(1m) -print ===> $data11 -if $data11 != 1 then +print ===> $data10 +if $data10 != 1 then return -1 endi sql select sum(tbcol) as b from $tb interval(1d) -print ===> $data01 -if $data01 != 190 then +print ===> $data00 +if $data00 != 190 then return -1 endi @@ -88,8 +85,8 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select sum(tbcol) as b from $tb where ts <= $ms interval(1m) -print ===> $data11 -if $data11 != 1 then +print ===> $data10 +if $data10 != 1 then return -1 endi if $rows != 5 then @@ -130,14 +127,14 @@ endi print =============== step9 sql select sum(tbcol) as b from $mt interval(1m) -print ===> $data11 -if $data11 < 5 then +print ===> $data10 +if $data10 < 5 then return -1 endi sql select sum(tbcol) as b from $mt interval(1d) -print ===> $data01 -if $data01 != 1900 then +print ===> $data00 +if $data00 != 1900 then return -1 endi @@ -156,10 +153,10 @@ print =============== step11 $cc = 4 * 60000 $ms = 1601481600000 + $cc -sql select sum(tbcol) as b from $mt where ts <= $ms interval(1d) group by tgcol -print select sum(tbcol) as b from $mt where ts <= $ms interval(1d) group by tgcol -print ===> $data01 -if $data01 != 10 then +sql select sum(tbcol) as b from $mt where ts <= $ms group by tgcol +print select sum(tbcol) as b from $mt where ts <= $ms group by tgcol +print ===> $data00 +if $data00 != 10 then return -1 endi if $rows != 10 then @@ -169,7 +166,7 @@ endi print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/compute/top.sim b/tests/script/tsim/compute/top.sim similarity index 81% rename from tests/script/general/compute/top.sim rename to tests/script/tsim/compute/top.sim index 1e9d302b7c..9899a8a9ea 100644 --- a/tests/script/general/compute/top.sim +++ b/tests/script/tsim/compute/top.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $dbPrefix = m_to_db @@ -48,8 +45,8 @@ $i = 1 $tb = $tbPrefix . $i sql select top(tbcol, 1) from $tb -print ===> $data01 -if $data01 != 19 then +print ===> $data00 +if $data00 != 19 then return -1 endi @@ -58,25 +55,25 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select top(tbcol, 1) from $tb where ts <= $ms -print ===> $data01 -if $data01 != 4 then +print ===> $data00 +if $data00 != 4 then return -1 endi print =============== step4 sql select top(tbcol, 1) as b from $tb -print ===> $data01 -if $data01 != 19 then +print ===> $data00 +if $data00 != 19 then return -1 endi print =============== step5 sql select top(tbcol, 2) as b from $tb -print ===> $data01 $data11 -if $data01 != 18 then +print ===> $data00 $data10 +if $data00 != 18 then return -1 endi -if $data11 != 19 then +if $data10 != 19 then return -1 endi @@ -85,11 +82,11 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select top(tbcol, 2) as b from $tb where ts <= $ms -print ===> $data01 $data11 -if $data01 != 3 then +print ===> $data00 $data10 +if $data00 != 3 then return -1 endi -if $data11 != 4 then +if $data10 != 4 then return -1 endi @@ -100,7 +97,7 @@ step6: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi From caa06f938ab36be29dd727dcfec245691f74e45e Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Wed, 13 Jul 2022 18:09:14 +0800 Subject: [PATCH 45/56] ci(stream): stream sliding --- tests/script/tsim/stream/sliding.sim | 243 +++++++++++++++++++++++++++ 1 file changed, 243 insertions(+) create mode 100644 tests/script/tsim/stream/sliding.sim diff --git a/tests/script/tsim/stream/sliding.sim b/tests/script/tsim/stream/sliding.sim new file mode 100644 index 0000000000..750be7cb49 --- /dev/null +++ b/tests/script/tsim/stream/sliding.sim @@ -0,0 +1,243 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams1 trigger at_once into streamt as select _wstartts, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s) sliding (5s); +sql create stream streams2 trigger at_once watermark 1d into streamt2 as select _wstartts, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s) sliding (5s); +sql create stream stream_t1 trigger at_once into streamtST as select _wstartts, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s) sliding (5s); +sql create stream stream_t2 trigger at_once watermark 1d into streamtST2 as select _wstartts, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s) sliding (5s); + +sql insert into t1 values(1648791210000,1,2,3,1.0); +sql insert into t1 values(1648791216000,2,2,3,1.1); +sql insert into t1 values(1648791220000,3,2,3,2.1); + +sql insert into t1 values(1648791210000,1,2,3,1.0); +sql insert into t1 values(1648791216000,2,2,3,1.1); +sql insert into t1 values(1648791220000,3,2,3,2.1); + +sql insert into t2 values(1648791210000,1,2,3,1.0); +sql insert into t2 values(1648791216000,2,2,3,1.1); +sql insert into t2 values(1648791220000,3,2,3,2.1); + +sql insert into t2 values(1648791210000,1,2,3,1.0); +sql insert into t2 values(1648791216000,2,2,3,1.1); +sql insert into t2 values(1648791220000,3,2,3,2.1); + +$loop_count = 0 + +loop0: +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + + +sql select * from streamt + +# row 0 +if $data01 != 1 then + print =====data01=$data01 + goto loop0 +endi + +if $data02 != 1 then + print =====data02=$data02 + goto loop0 +endi + +# row 1 +if $data11 != 2 then + print =====data11=$data11 + goto loop0 +endi + +if $data12 != 3 then + print =====data12=$data12 + goto loop0 +endi + +# row 2 +if $data21 != 2 then + print =====data21=$data21 + goto loop0 +endi + +if $data22 != 5 then + print =====data22=$data22 + goto loop0 +endi + +# row 3 +if $data31 != 1 then + print =====data31=$data31 + goto loop0 +endi + +if $data32 != 3 then + print =====data32=$data32 + goto loop0 +endi + +print step 1 + +sql select * from streamt2 + +# row 0 +if $data01 != 1 then + print =====data01=$data01 + goto loop0 +endi + +if $data02 != 1 then + print =====data02=$data02 + goto loop0 +endi + +# row 1 +if $data11 != 2 then + print =====data11=$data11 + goto loop0 +endi + +if $data12 != 3 then + print =====data12=$data12 + goto loop0 +endi + +# row 2 +if $data21 != 2 then + print =====data21=$data21 + goto loop0 +endi + +if $data22 != 5 then + print =====data22=$data22 + goto loop0 +endi + +# row 3 +if $data31 != 1 then + print =====data31=$data31 + goto loop0 +endi + +if $data32 != 3 then + print =====data32=$data32 + goto loop0 +endi + +print step 2 + +sql select * from streamtST + +# row 0 +if $data01 != 2 then + print =====data01=$data01 + goto loop0 +endi + +if $data02 != 2 then + print =====data02=$data02 + goto loop0 +endi + +# row 1 +if $data11 != 4 then + print =====data11=$data11 + goto loop0 +endi + +if $data12 != 6 then + print =====data12=$data12 + goto loop0 +endi + +# row 2 +if $data21 != 4 then + print =====data21=$data21 + goto loop0 +endi + +if $data22 != 10 then + print =====data22=$data22 + goto loop0 +endi + +# row 3 +if $data31 != 2 then + print =====data31=$data31 + goto loop0 +endi + +if $data32 != 6 then + print =====data32=$data32 + goto loop0 +endi + +print step 3 + +sql select * from streamtST2 + +# row 0 +if $data01 != 2 then + print =====data01=$data01 + goto loop0 +endi + +if $data02 != 2 then + print =====data02=$data02 + goto loop0 +endi + +# row 1 +if $data11 != 4 then + print =====data11=$data11 + goto loop0 +endi + +if $data12 != 6 then + print =====data12=$data12 + goto loop0 +endi + +# row 2 +if $data21 != 4 then + print =====data21=$data21 + goto loop0 +endi + +if $data22 != 10 then + print =====data22=$data22 + goto loop0 +endi + +# row 3 +if $data31 != 2 then + print =====data31=$data31 + goto loop0 +endi + +if $data32 != 6 then + print =====data32=$data32 + goto loop0 +endi + + +system sh/stop_dnodes.sh \ No newline at end of file From f488b0dd003050d68da3b157d5249e085a3f5d87 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 18:48:35 +0800 Subject: [PATCH 46/56] test: restore some 2.0 case --- tests/script/tsim/field/bool.sim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/script/tsim/field/bool.sim b/tests/script/tsim/field/bool.sim index 37292e9758..90bb4e7c3d 100644 --- a/tests/script/tsim/field/bool.sim +++ b/tests/script/tsim/field/bool.sim @@ -144,7 +144,7 @@ print =============== step8 sql select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true group by tgcol order by tgcol desc print select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true group by tgcol order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data01 != 100 then +if $data00 != 100 then return -1 endi From afb20f79c71c97a9d6b95905bb89e0161fa5689b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 13 Jul 2022 19:58:07 +0800 Subject: [PATCH 47/56] fix(query): copy the value instead of assign data. --- source/libs/executor/src/scanoperator.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index c7112ab8a6..8786d30007 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1183,7 +1183,10 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock for (int32_t j = 0; j < blockDataGetNumOfCols(pBlock); ++j) { SColumnInfoData* pResCol = bdGetColumnInfoData(pBlock, j); if (pResCol->info.colId == pColMatchInfo->colId) { - taosArraySet(pInfo->pRes->pDataBlock, pColMatchInfo->targetSlotId, pResCol); + + SColumnInfoData* pDst = taosArrayGet(pInfo->pRes->pDataBlock, pColMatchInfo->targetSlotId); + colDataAssign(pDst, pResCol, pBlock->info.rows, &pInfo->pRes->info); +// taosArraySet(pInfo->pRes->pDataBlock, pColMatchInfo->targetSlotId, pResCol); colExists = true; break; } @@ -2590,9 +2593,11 @@ static SSDataBlock* getTableDataBlock(void* param) { SDataBlockInfo binfo = pBlock->info; tsdbRetrieveDataBlockInfo(reader, &binfo); - binfo.capacity = binfo.rows; blockDataEnsureCapacity(pBlock, binfo.capacity); - pBlock->info = binfo; + pBlock->info.type = binfo.type; + pBlock->info.uid = binfo.uid; + pBlock->info.window = binfo.window; + pBlock->info.rows = binfo.rows; uint32_t status = 0; int32_t code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, readerIdx, pBlock, &status); From 2f7d6828073358a12380413063ae262fa0756b32 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 20:00:07 +0800 Subject: [PATCH 48/56] test: restore some 2.0 case --- tests/script/jenkins/basic.txt | 2 +- tests/script/tsim/compute/sum.sim | 6 +++--- tests/script/tsim/field/2.sim | 4 ++-- tests/script/tsim/field/3.sim | 6 +++--- tests/script/tsim/field/4.sim | 8 ++++---- tests/script/tsim/field/5.sim | 10 +++++----- tests/script/tsim/field/6.sim | 12 ++++++------ tests/script/tsim/field/bigint.sim | 2 +- tests/script/tsim/field/bool.sim | 4 ++-- tests/script/tsim/field/double.sim | 2 +- tests/script/tsim/field/float.sim | 2 +- tests/script/tsim/field/int.sim | 2 +- tests/script/tsim/field/smallint.sim | 2 +- tests/script/tsim/field/tinyint.sim | 2 +- tests/script/tsim/field/unsigined_bigint.sim | 2 +- 15 files changed, 33 insertions(+), 33 deletions(-) diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index ba94743df8..59536f2ef6 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -242,7 +242,7 @@ #./test.sh -f tsim/compute/null.sim ./test.sh -f tsim/compute/percentile.sim ./test.sh -f tsim/compute/stddev.sim -./test.sh -f tsim/compute/sum.sim +#./test.sh -f tsim/compute/sum.sim ./test.sh -f tsim/compute/top.sim # ---- field diff --git a/tests/script/tsim/compute/sum.sim b/tests/script/tsim/compute/sum.sim index d4185f3204..c53568f98f 100644 --- a/tests/script/tsim/compute/sum.sim +++ b/tests/script/tsim/compute/sum.sim @@ -153,9 +153,9 @@ print =============== step11 $cc = 4 * 60000 $ms = 1601481600000 + $cc -sql select sum(tbcol) as b from $mt where ts <= $ms group by tgcol -print select sum(tbcol) as b from $mt where ts <= $ms group by tgcol -print ===> $data00 +sql select sum(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1d) +print select sum(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1d) +print ===> $data00 $rows if $data00 != 10 then return -1 endi diff --git a/tests/script/tsim/field/2.sim b/tests/script/tsim/field/2.sim index b5c501ceed..3161f02097 100644 --- a/tests/script/tsim/field/2.sim +++ b/tests/script/tsim/field/2.sim @@ -278,9 +278,9 @@ if $data00 != 25 then endi print =============== step12 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol2 = 1 group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol2 = 1 partition by tgcol interval(1d) order by tgcol desc print $db -print select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol2 = 1 group by tgcol +print select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol2 = 1 partition by tgcol interval(1d) print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/3.sim b/tests/script/tsim/field/3.sim index 661bc6a85a..72b65c7406 100644 --- a/tests/script/tsim/field/3.sim +++ b/tests/script/tsim/field/3.sim @@ -491,19 +491,19 @@ if $data00 != 25 then endi print =============== step19 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by tgcol1 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol1 interval(1d) order by tgcol1 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by tgcol2 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol2 interval(1d) order by tgcol2 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol3 order by tgcol3 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol3 interval(1d) order by tgcol3 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/4.sim b/tests/script/tsim/field/4.sim index 734179c5bb..d37c05173c 100644 --- a/tests/script/tsim/field/4.sim +++ b/tests/script/tsim/field/4.sim @@ -675,25 +675,25 @@ if $data00 != 25 then endi print =============== step24 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by tgcol1 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol1 interval(1d) order by tgcol1 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by tgcol2 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol2 interval(1d) order by tgcol2 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol3 order by tgcol3 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol3 interval(1d) order by tgcol3 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol4 order by tgcol4 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 partition by tgcol4 interval(1d) order by tgcol4 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/5.sim b/tests/script/tsim/field/5.sim index 5185d8556e..127dcd2683 100644 --- a/tests/script/tsim/field/5.sim +++ b/tests/script/tsim/field/5.sim @@ -792,31 +792,31 @@ endi print =============== step27 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by tgcol1 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol1 interval(1d) order by tgcol1 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by tgcol2 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol2 interval(1d) order by tgcol2 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol3 order by tgcol3 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol3 interval(1d) order by tgcol3 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol4 order by tgcol4 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 partition by tgcol4 interval(1d) order by tgcol4 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol5 order by tgcol5 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 partition by tgcol5 interval(1d) order by tgcol5 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/6.sim b/tests/script/tsim/field/6.sim index 8ceefae228..474582fcae 100644 --- a/tests/script/tsim/field/6.sim +++ b/tests/script/tsim/field/6.sim @@ -941,37 +941,37 @@ if $data00 != 25 then endi print =============== step31 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by tgcol1 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol1 interval(1d) order by tgcol1 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by tgcol2 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol2 interval(1d) order by tgcol2 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol3 order by tgcol3 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol3 interval(1d) order by tgcol3 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol4 order by tgcol4 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 partition by tgcol4 interval(1d) order by tgcol4 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol5 order by tgcol5 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 partition by tgcol5 interval(1d) order by tgcol5 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol6 order by tgcol6 desc +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 partition by tgcol6 interval(1d) order by tgcol6 desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/bigint.sim b/tests/script/tsim/field/bigint.sim index c580a4df1c..d9401ed88f 100644 --- a/tests/script/tsim/field/bigint.sim +++ b/tests/script/tsim/field/bigint.sim @@ -143,7 +143,7 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 partition by tgcol interval(1d) order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/bool.sim b/tests/script/tsim/field/bool.sim index 90bb4e7c3d..04cd48ab2d 100644 --- a/tests/script/tsim/field/bool.sim +++ b/tests/script/tsim/field/bool.sim @@ -141,8 +141,8 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true group by tgcol order by tgcol desc -print select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true group by tgcol order by tgcol desc +sql select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true partition by tgcol interval(1d) order by tgcol desc +print select count(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = true partition by tgcol interval(1d) order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/double.sim b/tests/script/tsim/field/double.sim index e7b1c8e8af..c7b26add65 100644 --- a/tests/script/tsim/field/double.sim +++ b/tests/script/tsim/field/double.sim @@ -141,7 +141,7 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 partition by tgcol interval(1d) order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/float.sim b/tests/script/tsim/field/float.sim index 159a4b60ab..1e11eed3be 100644 --- a/tests/script/tsim/field/float.sim +++ b/tests/script/tsim/field/float.sim @@ -142,7 +142,7 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 partition by tgcol interval(1d) order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/int.sim b/tests/script/tsim/field/int.sim index 2b5b70141a..484272631b 100644 --- a/tests/script/tsim/field/int.sim +++ b/tests/script/tsim/field/int.sim @@ -142,7 +142,7 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 partition by tgcol interval(1d) order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/smallint.sim b/tests/script/tsim/field/smallint.sim index 975f02bf9b..326186f6c2 100644 --- a/tests/script/tsim/field/smallint.sim +++ b/tests/script/tsim/field/smallint.sim @@ -142,7 +142,7 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 partition by tgcol interval(1d) order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/tinyint.sim b/tests/script/tsim/field/tinyint.sim index ff24e484a7..cba4ac504d 100644 --- a/tests/script/tsim/field/tinyint.sim +++ b/tests/script/tsim/field/tinyint.sim @@ -142,7 +142,7 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 partition by tgcol interval(1d) order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 diff --git a/tests/script/tsim/field/unsigined_bigint.sim b/tests/script/tsim/field/unsigined_bigint.sim index d8421e7626..0a492ae44c 100644 --- a/tests/script/tsim/field/unsigined_bigint.sim +++ b/tests/script/tsim/field/unsigined_bigint.sim @@ -147,7 +147,7 @@ if $data00 != 25 then endi print =============== step8 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by tgcol desc +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 partition by tgcol interval(1d) order by tgcol desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 From a8569fc44ba0283b6514ffbd77460d9f847d1be3 Mon Sep 17 00:00:00 2001 From: Hui Li <52318143+plum-lihui@users.noreply.github.com> Date: Wed, 13 Jul 2022 20:04:11 +0800 Subject: [PATCH 49/56] Delete tmqUpdate1.py test: del nouse script --- tests/system-test/7-tmq/tmqUpdate1.py | 175 -------------------------- 1 file changed, 175 deletions(-) delete mode 100644 tests/system-test/7-tmq/tmqUpdate1.py diff --git a/tests/system-test/7-tmq/tmqUpdate1.py b/tests/system-test/7-tmq/tmqUpdate1.py deleted file mode 100644 index 5f11090385..0000000000 --- a/tests/system-test/7-tmq/tmqUpdate1.py +++ /dev/null @@ -1,175 +0,0 @@ - -import taos -import sys -import time -import socket -import os -import threading -from enum import Enum - -from util.log import * -from util.sql import * -from util.cases import * -from util.dnodes import * -sys.path.append("./7-tmq") -from tmqCommon import * - -class TDTestCase: - def __init__(self): - self.vgroups = 4 - self.ctbNum = 1000 - self.rowsPerTbl = 1000 - - def init(self, conn, logSql): - tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) - - def prepareTestEnv(self): - tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") - paraDict = {'dbName': 'dbt', - 'dropFlag': 1, - 'event': '', - 'vgroups': 4, - 'stbName': 'stb', - 'colPrefix': 'c', - 'tagPrefix': 't', - 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], - 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], - 'ctbPrefix': 'ctb', - 'ctbStartIdx': 0, - 'ctbNum': 1000, - 'rowsPerTbl': 1000, - 'batchNum': 400, - 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 - 'pollDelay': 3, - 'showMsg': 1, - 'showRow': 1, - 'snapshot': 0} - - paraDict['vgroups'] = self.vgroups - paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - - tmqCom.initConsumerTable() - tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) - tdLog.info("create stb") - tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) - tdLog.info("create ctb") - tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], - ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) - tdLog.info("insert data") - paraDict['ctbNum'] = int(self.ctbNum / 2) - tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], - ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", - ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") - # tdSql.query("flush database %s"%(paraDict['dbName'])) - return - - def tmqCase1(self): - tdLog.printNoPrefix("======== test case 1: ") - paraDict = {'dbName': 'dbt', - 'dropFlag': 1, - 'event': '', - 'vgroups': 4, - 'stbName': 'stb', - 'colPrefix': 'c', - 'tagPrefix': 't', - 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], - 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], - 'ctbPrefix': 'ctb', - 'ctbStartIdx': 0, - 'ctbNum': 1000, - 'rowsPerTbl': 1000, - 'batchNum': 1000, - 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 - 'pollDelay': 5, - 'showMsg': 1, - 'showRow': 1, - 'snapshot': 1} - - paraDict['vgroups'] = self.vgroups - paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") - tdSql.query("flush database %s"%(paraDict['dbName'])) - - # update to half tables - paraDict['ctbNum'] = int(self.ctbNum / 4) - tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", - ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+paraDict['ctbNum']) - tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], - ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+paraDict['ctbNum']) - - tmqCom.initConsumerTable() - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) - sqlString = "create topic %s as %s" %(topicFromStb1, queryString) - tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - - paraDict['ctbNum'] = self.ctbNum - consumerId = 0 - expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1) - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:true,\ - auto.commit.interval.ms:1000,\ - auto.offset.reset:earliest' - tmqCom.insertConsumerInfo(consumerId, expectrowcnt + paraDict["rowsPerTbl"] * paraDict["ctbNum"],topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - - paraDict['ctbNum'] = int(self.ctbNum / 2) - paraDict['ctbStartIdx'] += paraDict['ctbNum'] - _ = tmqCom.asyncInsertDataByInterlace(paraDict) - time.sleep(3) - pthread = tmqCom.asyncInsertDataByInterlace(paraDict) - pthread.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = tmqCom.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - tdSql.query(queryString) - totalRowsInserted = tdSql.getRows() - - tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) - - if totalConsumeRows <= totalRowsInserted or totalConsumeRows != expectrowcnt: - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 1 end ...... ") - - - def run(self): - tdSql.prepare() - self.prepareTestEnv() - self.tmqCase1() - # self.tmqCase2() - - - def stop(self): - tdSql.close() - tdLog.success(f"{__file__} successfully executed") - -event = threading.Event() - -tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) From 26c5ac3e844090d30ad32f5eb7860fbf019d560d Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 20:23:33 +0800 Subject: [PATCH 50/56] test: restore some 2.0 case --- tests/script/general/vector/testSuite.sim | 11 -------- tests/script/jenkins/basic.txt | 13 ++++++++++ .../vector/metrics_field.sim | 20 +++----------- .../{general => tsim}/vector/metrics_mix.sim | 18 +++---------- .../vector/metrics_query.sim | 20 +++----------- .../{general => tsim}/vector/metrics_tag.sim | 18 +++---------- .../{general => tsim}/vector/metrics_time.sim | 18 +++---------- .../script/{general => tsim}/vector/multi.sim | 26 +++++-------------- .../{general => tsim}/vector/single.sim | 14 +++------- .../{general => tsim}/vector/table_field.sim | 18 +++---------- .../{general => tsim}/vector/table_mix.sim | 18 +++---------- .../{general => tsim}/vector/table_query.sim | 18 +++---------- .../{general => tsim}/vector/table_time.sim | 18 +++---------- 13 files changed, 58 insertions(+), 172 deletions(-) delete mode 100644 tests/script/general/vector/testSuite.sim rename tests/script/{general => tsim}/vector/metrics_field.sim (97%) rename tests/script/{general => tsim}/vector/metrics_mix.sim (98%) rename tests/script/{general => tsim}/vector/metrics_query.sim (97%) rename tests/script/{general => tsim}/vector/metrics_tag.sim (97%) rename tests/script/{general => tsim}/vector/metrics_time.sim (98%) rename tests/script/{general => tsim}/vector/multi.sim (90%) rename tests/script/{general => tsim}/vector/single.sim (96%) rename tests/script/{general => tsim}/vector/table_field.sim (97%) rename tests/script/{general => tsim}/vector/table_mix.sim (98%) rename tests/script/{general => tsim}/vector/table_query.sim (97%) rename tests/script/{general => tsim}/vector/table_time.sim (97%) diff --git a/tests/script/general/vector/testSuite.sim b/tests/script/general/vector/testSuite.sim deleted file mode 100644 index f0b9fef991..0000000000 --- a/tests/script/general/vector/testSuite.sim +++ /dev/null @@ -1,11 +0,0 @@ -run general/vector/metrics_field.sim -run general/vector/metrics_mix.sim -run general/vector/metrics_query.sim -run general/vector/metrics_tag.sim -run general/vector/metrics_time.sim -run general/vector/multi.sim -run general/vector/single.sim -run general/vector/table_field.sim -run general/vector/table_mix.sim -run general/vector/table_query.sim -run general/vector/table_time.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 59536f2ef6..5f87b8a55f 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -262,4 +262,17 @@ ./test.sh -f tsim/field/tinyint.sim ./test.sh -f tsim/field/unsigined_bigint.sim +# ---- vector +./test.sh -f tsim/vector/metrics_field.sim +./test.sh -f tsim/vector/metrics_mix.sim +./test.sh -f tsim/vector/metrics_query.sim +./test.sh -f tsim/vector/metrics_tag.sim +./test.sh -f tsim/vector/metrics_time.sim +./test.sh -f tsim/vector/multi.sim +./test.sh -f tsim/vector/single.sim +./test.sh -f tsim/vector/table_field.sim +./test.sh -f tsim/vector/table_mix.sim +./test.sh -f tsim/vector/table_query.sim +./test.sh -f tsim/vector/table_time.sim + #======================b1-end=============== diff --git a/tests/script/general/vector/metrics_field.sim b/tests/script/tsim/vector/metrics_field.sim similarity index 97% rename from tests/script/general/vector/metrics_field.sim rename to tests/script/tsim/vector/metrics_field.sim index 2719805c63..4d0f9e19fc 100644 --- a/tests/script/general/vector/metrics_field.sim +++ b/tests/script/tsim/vector/metrics_field.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_mf_db @@ -99,17 +95,9 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $mt where a = 5 -x step21 - return -1 -step21: - -sql select h - f from $mt where a = 5 -x step22 - return -1 -step22: - -sql select ts - f from $mt where a = 5 -x step23 - return -1 -step23: +sql select g - f from $mt where a = 5 +sql select h - f from $mt where a = 5 +sql select ts - f from $mt where a = 5 sql select a - e from $mt where a = 5 print ===> $data00 @@ -616,7 +604,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/metrics_mix.sim b/tests/script/tsim/vector/metrics_mix.sim similarity index 98% rename from tests/script/general/vector/metrics_mix.sim rename to tests/script/tsim/vector/metrics_mix.sim index 7c9bb3b668..fd36a62332 100644 --- a/tests/script/general/vector/metrics_mix.sim +++ b/tests/script/tsim/vector/metrics_mix.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_mx_db @@ -99,17 +95,11 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $mt where a = 5 and tgcol = 5 and ts > now + 4m and ts < now + 6m -x step21 - return -1 -step21: +sql select g - f from $mt where a = 5 and tgcol = 5 and ts > now + 4m and ts < now + 6m -sql select h - f from $mt where a = 5 and tgcol = 5 and ts > now + 4m and ts < now + 6m -x step22 - return -1 -step22: +sql select h - f from $mt where a = 5 and tgcol = 5 and ts > now + 4m and ts < now + 6m -sql select ts - f from $mt where a = 5 and tgcol = 5 and ts > now + 4m and ts < now + 6m -x step23 - return -1 -step23: +sql select ts - f from $mt where a = 5 and tgcol = 5 and ts > now + 4m and ts < now + 6m sql select a - e from $mt where a = 5 and tgcol = 5 and ts > now + 4m and ts < now + 6m print ===> $data00 @@ -616,7 +606,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/metrics_query.sim b/tests/script/tsim/vector/metrics_query.sim similarity index 97% rename from tests/script/general/vector/metrics_query.sim rename to tests/script/tsim/vector/metrics_query.sim index fd635a3104..8a334acef2 100644 --- a/tests/script/general/vector/metrics_query.sim +++ b/tests/script/tsim/vector/metrics_query.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_mq_db @@ -95,17 +91,9 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $mt -x step21 - return -1 -step21: - -sql select h - f from $mt -x step22 - return -1 -step22: - -sql select ts - f from $mt -x step23 - return -1 -step23: +sql select g - f from $mt +sql select h - f from $mt +sql select ts - f from $mt sql select a - e from $mt print ===> $data00 @@ -612,7 +600,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/metrics_tag.sim b/tests/script/tsim/vector/metrics_tag.sim similarity index 97% rename from tests/script/general/vector/metrics_tag.sim rename to tests/script/tsim/vector/metrics_tag.sim index 1d412d35d3..0b275336f9 100644 --- a/tests/script/general/vector/metrics_tag.sim +++ b/tests/script/tsim/vector/metrics_tag.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_mtg_db @@ -95,17 +91,11 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $mt where tgcol = 5 -x step21 - return -1 -step21: +sql select g - f from $mt where tgcol = 5 -sql select h - f from $mt where tgcol = 5 -x step22 - return -1 -step22: +sql select h - f from $mt where tgcol = 5 -sql select ts - f from $mt where tgcol = 5 -x step23 - return -1 -step23: +sql select ts - f from $mt where tgcol = 5 sql select a - e from $mt where tgcol = 5 print ===> $data00 @@ -612,7 +602,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/metrics_time.sim b/tests/script/tsim/vector/metrics_time.sim similarity index 98% rename from tests/script/general/vector/metrics_time.sim rename to tests/script/tsim/vector/metrics_time.sim index d0152439bf..bcd93cb582 100644 --- a/tests/script/general/vector/metrics_time.sim +++ b/tests/script/tsim/vector/metrics_time.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_mt_db @@ -95,17 +91,11 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $mt where tgcol = 5 and ts > now + 4m and ts < now + 6m -x step21 - return -1 -step21: +sql select g - f from $mt where tgcol = 5 and ts > now + 4m and ts < now + 6m -sql select h - f from $mt where tgcol = 5 and ts > now + 4m and ts < now + 6m -x step22 - return -1 -step22: +sql select h - f from $mt where tgcol = 5 and ts > now + 4m and ts < now + 6m -sql select ts - f from $mt where tgcol = 5 and ts > now + 4m and ts < now + 6m -x step23 - return -1 -step23: +sql select ts - f from $mt where tgcol = 5 and ts > now + 4m and ts < now + 6m sql select a - e from $mt where tgcol = 5 and ts > now + 4m and ts < now + 6m print ===> $data00 @@ -612,7 +602,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/multi.sim b/tests/script/tsim/vector/multi.sim similarity index 90% rename from tests/script/general/vector/multi.sim rename to tests/script/tsim/vector/multi.sim index 1101b0b0db..dcedbe73c9 100644 --- a/tests/script/general/vector/multi.sim +++ b/tests/script/tsim/vector/multi.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_mu_db @@ -71,9 +67,7 @@ sql select a + a from $tb where ts > now + 4m order by ts desc sql select a + c from $tb where ts < now + 4m order by ts asc -sql select a + f from $tb where ts > now + 4m order by ts asc -x step24 - return -1 -step24: +sql select a + f from $tb where ts > now + 4m order by ts asc print =============== step3 $i = 1 @@ -150,17 +144,11 @@ endi print =============== step6 $i = 1 $tb = $tbPrefix . $i -sql select a + ts from $tb -x step61 - return -1 -step61: +sql select a + ts from $tb -sql select a + f from $tb -x step62 - return -1 -step62: +sql select a + f from $tb -sql select a + g from $tb -x step63 - return -1 -step63: +sql select a + g from $tb print =============== step7 $i = 1 @@ -202,14 +190,12 @@ sql select a + a from $tb where e = 2 and ts > now + 4m order by ts desc sql select a + c from $tb where f = 2 and ts < now + 4m order by ts asc -sql select a + f from $tb where g = 2 and ts > now + 4m order by ts asc -x step74 - return -1 -step74: +sql select a + f from $tb where g = 2 and ts > now + 4m order by ts asc print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/single.sim b/tests/script/tsim/vector/single.sim similarity index 96% rename from tests/script/general/vector/single.sim rename to tests/script/tsim/vector/single.sim index e979a0ffb7..c9d794456c 100644 --- a/tests/script/general/vector/single.sim +++ b/tests/script/tsim/vector/single.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_si_db @@ -150,9 +146,7 @@ $i = 11 $tb = $tbPrefix . $i sql create table $tb (ts timestamp, tbcol bool) sql insert into $tb values(now, 0) -sql select tbcol + 2 from $tb -x step6 - return -1 -step6: +sql select tbcol + 2 from $tb print =============== step7 $i = $i + 1 @@ -289,14 +283,12 @@ $i = $i + 1 $tb = $tbPrefix . $i sql create table $tb (ts timestamp, tbcol binary(100)) sql insert into $tb values(now, '0'); -sql select tbcol + 2 from $tb -x step12 - return -1 -step12: +sql select tbcol + 2 from $tb print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/table_field.sim b/tests/script/tsim/vector/table_field.sim similarity index 97% rename from tests/script/general/vector/table_field.sim rename to tests/script/tsim/vector/table_field.sim index d86eb99331..5ad60b2a35 100644 --- a/tests/script/general/vector/table_field.sim +++ b/tests/script/tsim/vector/table_field.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_tf_db @@ -95,17 +91,11 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $tb where a = 5 -x step21 - return -1 -step21: +sql select g - f from $tb where a = 5 -sql select h - f from $tb where a = 5 -x step22 - return -1 -step22: +sql select h - f from $tb where a = 5 -sql select ts - f from $tb where a = 5 -x step23 - return -1 -step23: +sql select ts - f from $tb where a = 5 sql select a - e from $tb where a = 5 print ===> $data00 @@ -612,7 +602,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/table_mix.sim b/tests/script/tsim/vector/table_mix.sim similarity index 98% rename from tests/script/general/vector/table_mix.sim rename to tests/script/tsim/vector/table_mix.sim index 5c4fb52888..358d6cf87f 100644 --- a/tests/script/general/vector/table_mix.sim +++ b/tests/script/tsim/vector/table_mix.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_tm_db @@ -95,17 +91,11 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $tb where a = 5 and ts > now + 4m and ts < now + 6m -x step21 - return -1 -step21: +sql select g - f from $tb where a = 5 and ts > now + 4m and ts < now + 6m -sql select h - f from $tb where a = 5 and ts > now + 4m and ts < now + 6m -x step22 - return -1 -step22: +sql select h - f from $tb where a = 5 and ts > now + 4m and ts < now + 6m -sql select ts - f from $tb where a = 5 and ts > now + 4m and ts < now + 6m -x step23 - return -1 -step23: +sql select ts - f from $tb where a = 5 and ts > now + 4m and ts < now + 6m sql select a - e from $tb where a = 5 and ts > now + 4m and ts < now + 6m print ===> $data00 @@ -612,7 +602,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/table_query.sim b/tests/script/tsim/vector/table_query.sim similarity index 97% rename from tests/script/general/vector/table_query.sim rename to tests/script/tsim/vector/table_query.sim index 9ef18255a9..0e4562716e 100644 --- a/tests/script/general/vector/table_query.sim +++ b/tests/script/tsim/vector/table_query.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_tq_db @@ -95,17 +91,11 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $tb -x step21 - return -1 -step21: +sql select g - f from $tb -sql select h - f from $tb -x step22 - return -1 -step22: +sql select h - f from $tb -sql select ts - f from $tb -x step23 - return -1 -step23: +sql select ts - f from $tb sql select a - e from $tb print ===> $data00 @@ -612,7 +602,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/general/vector/table_time.sim b/tests/script/tsim/vector/table_time.sim similarity index 97% rename from tests/script/general/vector/table_time.sim rename to tests/script/tsim/vector/table_time.sim index c38546b117..1e6bdb2cde 100644 --- a/tests/script/general/vector/table_time.sim +++ b/tests/script/tsim/vector/table_time.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect $dbPrefix = m_tt_db @@ -95,17 +91,11 @@ if $data00 != 0.000000000 then return -1 endi -sql select g - f from $tb where ts > now + 4m and ts < now + 6m -x step21 - return -1 -step21: +sql select g - f from $tb where ts > now + 4m and ts < now + 6m -sql select h - f from $tb where ts > now + 4m and ts < now + 6m -x step22 - return -1 -step22: +sql select h - f from $tb where ts > now + 4m and ts < now + 6m -sql select ts - f from $tb where ts > now + 4m and ts < now + 6m -x step23 - return -1 -step23: +sql select ts - f from $tb where ts > now + 4m and ts < now + 6m sql select a - e from $tb where ts > now + 4m and ts < now + 6m print ===> $data00 @@ -612,7 +602,7 @@ step63: print =============== clear sql drop database $db sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi From ad051d4e65962ac1bb670210d0c0a9b99bbae88f Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 13 Jul 2022 20:25:16 +0800 Subject: [PATCH 51/56] fix: fix stop query issue --- source/client/inc/clientInt.h | 1 + source/client/src/clientImpl.c | 3 ++- source/client/src/clientMain.c | 47 +++++++++++++++++----------------- tests/script/api/stopquery.c | 9 ++++--- 4 files changed, 32 insertions(+), 28 deletions(-) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 700a4d9daf..367e656f06 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -181,6 +181,7 @@ typedef struct SRequestSendRecvBody { tsem_t rspSem; // not used now __taos_async_fn_t queryFp; __taos_async_fn_t fetchFp; + EQueryExecMode execMode; void* param; SDataBuf requestMsg; int64_t queryJob; // query job, created according to sql query DAG. diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index d923929c95..d846cb93af 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -933,6 +933,8 @@ SRequestObj* launchQuery(uint64_t connId, const char* sql, int sqlLen, bool vali void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta) { int32_t code = 0; + pRequest->body.execMode = pQuery->execMode; + switch (pQuery->execMode) { case QUERY_EXEC_MODE_LOCAL: asyncExecLocalCmd(pRequest, pQuery); @@ -1149,7 +1151,6 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t SRequestObj* pRequest = createRequest(pTscObj->id, TDMT_MND_CONNECT); if (pRequest == NULL) { destroyTscObj(pTscObj); - terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; return NULL; } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 73def5b9b1..14a431feab 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -49,7 +49,7 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) { } // this function may be called by user or system, or by both simultaneously. void taos_cleanup(void) { - tscInfo("start to cleanup client environment"); + tscDebug("start to cleanup client environment"); if (atomic_val_compare_exchange_32(&sentinel, TSC_VAR_NOT_RELEASE, TSC_VAR_RELEASED) != TSC_VAR_NOT_RELEASE) { return; } @@ -58,7 +58,10 @@ void taos_cleanup(void) { clientReqRefPool = -1; taosCloseRef(id); - cleanupTaskQueue(); + hbMgrCleanUp(); + + catalogDestroy(); + schedulerDestroy(); fmFuncMgtDestroy(); qCleanupKeywordsTable(); @@ -67,12 +70,11 @@ void taos_cleanup(void) { clientConnRefPool = -1; taosCloseRef(id); - hbMgrCleanUp(); - - catalogDestroy(); - schedulerDestroy(); - rpcCleanup(); + tscDebug("rpc cleanup"); + + cleanupTaskQueue(); + tscInfo("all local resources released"); taosCleanupCfg(); taosCloseLog(); @@ -852,27 +854,24 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { } // all data has returned to App already, no need to try again - if (pResultInfo->completed && (pRequest->body.queryJob != 0)) { - pResultInfo->numOfRows = 0; + if (pResultInfo->completed) { + // it is a local executed query, no need to do async fetch + if (QUERY_EXEC_MODE_LOCAL == pRequest->body.execMode) { + ASSERT(pResultInfo->numOfRows >= 0); + if (pResultInfo->localResultFetched) { + pResultInfo->numOfRows = 0; + pResultInfo->current = 0; + } else { + pResultInfo->localResultFetched = true; + } + } else { + pResultInfo->numOfRows = 0; + } + pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows); return; } - // it is a local executed query, no need to do async fetch - if (pRequest->body.queryJob == 0) { - ASSERT(pResultInfo->completed && pResultInfo->numOfRows >= 0); - if (pResultInfo->localResultFetched) { - pResultInfo->numOfRows = 0; - pResultInfo->current = 0; - pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows); - } else { - pResultInfo->localResultFetched = true; - pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows); - } - return; - } - - SSchedulerReq req = { .syncReq = false, .fetchFp = fetchCallback, diff --git a/tests/script/api/stopquery.c b/tests/script/api/stopquery.c index 92baf43d85..082d987a22 100644 --- a/tests/script/api/stopquery.c +++ b/tests/script/api/stopquery.c @@ -633,6 +633,7 @@ int sqConCleanupSyncQuery(bool fetch) { pthread_join(qid, NULL); pthread_join(cid, NULL); + break; } CASE_LEAVE(); } @@ -648,6 +649,7 @@ int sqConCleanupAsyncQuery(bool fetch) { pthread_join(qid, NULL); pthread_join(cid, NULL); + break; } CASE_LEAVE(); } @@ -655,7 +657,7 @@ int sqConCleanupAsyncQuery(bool fetch) { void sqRunAllCase(void) { -#if 0 +#if 1 sqStopSyncQuery(false); sqStopSyncQuery(true); sqStopAsyncQuery(false); @@ -688,16 +690,17 @@ void sqRunAllCase(void) { sqConKillAsyncQuery(true); #endif + /* sqConCleanupSyncQuery(false); sqConCleanupSyncQuery(true); sqConCleanupAsyncQuery(false); sqConCleanupAsyncQuery(true); - + */ int32_t l = 5; while (l) { printf("%d\n", l--); - sleep(1000); + sleep(1); } } From 176379dfab513a920451f54beab4af6a002425d4 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 20:44:32 +0800 Subject: [PATCH 52/56] test: restore some 2.0 case --- tests/script/general/wal/maxtables.sim | 46 ------ tests/script/general/wal/sync.sim | 146 -------------------- tests/script/jenkins/basic.txt | 3 + tests/script/{general => tsim}/wal/kill.sim | 25 +--- 4 files changed, 10 insertions(+), 210 deletions(-) delete mode 100644 tests/script/general/wal/maxtables.sim delete mode 100644 tests/script/general/wal/sync.sim rename tests/script/{general => tsim}/wal/kill.sim (73%) diff --git a/tests/script/general/wal/maxtables.sim b/tests/script/general/wal/maxtables.sim deleted file mode 100644 index acd6af1d1a..0000000000 --- a/tests/script/general/wal/maxtables.sim +++ /dev/null @@ -1,46 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 100 -system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1 -system sh/cfg.sh -n dnode1 -c tableIncStepPerVnode -v 2 - - -print ============== deploy -system sh/exec.sh -n dnode1 -s start -sleep 3001 -sql connect - -sql create database d1 -sql use d1 -sql create table st (ts timestamp, tbcol int) TAGS(tgcol int) - -$i = 0 -while $i < 100 - $tb = t . $i - sql create table $tb using st tags( $i ) - sql insert into $tb values (now , $i ) - $i = $i + 1 -endw - -sql_error sql create table tt (ts timestamp, i int) - -print =============== step3 -sql select * from st; -if $rows != 100 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 -sleep 2000 - -print =============== step4 -system sh/exec.sh -n dnode1 -s start -sleep 2000 - -sql select * from st; -if $rows != 100 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/wal/sync.sim b/tests/script/general/wal/sync.sim deleted file mode 100644 index 3a89523918..0000000000 --- a/tests/script/general/wal/sync.sim +++ /dev/null @@ -1,146 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 - -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4 -system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4 - -system sh/cfg.sh -n dnode1 -c http -v 1 -system sh/cfg.sh -n dnode2 -c http -v 1 -system sh/cfg.sh -n dnode3 -c http -v 1 - -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20000 -system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20000 -system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 20000 - -system sh/cfg.sh -n dnode1 -c replica -v 3 -system sh/cfg.sh -n dnode2 -c replica -v 3 -system sh/cfg.sh -n dnode3 -c replica -v 3 - -system sh/cfg.sh -n dnode1 -c maxSQLLength -v 940032 -system sh/cfg.sh -n dnode2 -c maxSQLLength -v 940032 -system sh/cfg.sh -n dnode3 -c maxSQLLength -v 940032 - -print ============== deploy - -system sh/exec.sh -n dnode1 -s start -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start - -print =============== step1 -$x = 0 -show1: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi -sql show mnodes -x show1 -$mnode1Role = $data2_1 -print mnode1Role $mnode1Role -$mnode2Role = $data2_2 -print mnode2Role $mnode2Role -$mnode3Role = $data2_3 -print mnode3Role $mnode3Role - -if $mnode1Role != master then - goto show1 -endi -if $mnode2Role != slave then - goto show1 -endi -if $mnode3Role != slave then - goto show1 -endi - -print =============== step2 -sql create database d1 replica 3 -sql use d1 - -sql create table table_rest (ts timestamp, i int) -print sql length is 870KB -restful d1 table_rest 1591072800 30000 -restful d1 table_rest 1591172800 30000 -restful d1 table_rest 1591272800 30000 -restful d1 table_rest 1591372800 30000 -restful d1 table_rest 1591472800 30000 -restful d1 table_rest 1591572800 30000 -restful d1 table_rest 1591672800 30000 -restful d1 table_rest 1591772800 30000 -restful d1 table_rest 1591872800 30000 -restful d1 table_rest 1591972800 30000 - -sleep 100 -sql select * from table_rest; -print rows: $rows -if $rows != 300000 then - return -1 -endi - -print =============== step3 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -sql select * from table_rest; -print rows: $rows -if $rows != 300000 then - return -1 -endi -system sh/exec.sh -n dnode1 -s start -x SIGINT - -$x = 0 -a1: - $x = $x + 1 - sleep 1000 - if $x == 40 then - return -1 - endi - -sql show vgroups -print online vnodes $data03 -if $data03 != 3 then - goto a1 -endi - -print =============== step4 -system sh/exec.sh -n dnode2 -s stop -x SIGINT -sql select * from table_rest; -print rows: $rows -if $rows != 300000 then - return -1 -endi -system sh/exec.sh -n dnode2 -s start -x SIGINT -$x = 0 -a2: - $x = $x + 1 - sleep 1000 - if $x == 40 then - return -1 - endi - -sql show vgroups -print online vnodes $data03 -if $data03 != 3 then - goto a2 -endi - -print =============== step5 -system sh/exec.sh -n dnode3 -s stop -x SIGINT -sql select * from table_rest; -print rows: $rows -if $rows != 300000 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 5f87b8a55f..c88b5e2b78 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -275,4 +275,7 @@ ./test.sh -f tsim/vector/table_query.sim ./test.sh -f tsim/vector/table_time.sim +# ---- wal +./test.sh -f tsim/wal/kill.sim + #======================b1-end=============== diff --git a/tests/script/general/wal/kill.sim b/tests/script/tsim/wal/kill.sim similarity index 73% rename from tests/script/general/wal/kill.sim rename to tests/script/tsim/wal/kill.sim index 94a35b636e..f8a732f59f 100644 --- a/tests/script/general/wal/kill.sim +++ b/tests/script/tsim/wal/kill.sim @@ -2,8 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 print ============== deploy -system sh/exec.sh -n dnode1 -s start -sleep 3001 +system sh/exec.sh -n dnode1 -s start ] sql connect sql create database d1 @@ -13,65 +12,55 @@ sql create table t1 (ts timestamp, i int) sql insert into t1 values(now, 1); print =============== step3 -sleep 2000 sql select * from t1; print rows: $rows if $rows != 1 then return -1 endi system sh/exec.sh -n dnode1 -s stop -x SIGKILL -sleep 2000 print =============== step4 -system sh/exec.sh -n dnode1 -s start -x SIGKILL -sleep 2000 +system sh/exec.sh -n dnode1 -s start sql select * from t1; print rows: $rows if $rows != 1 then return -1 endi system sh/exec.sh -n dnode1 -s stop -x SIGKILL -sleep 2000 print =============== step5 -system sh/exec.sh -n dnode1 -s start -x SIGKILL -sleep 2000 +system sh/exec.sh -n dnode1 -s start sql select * from t1; print rows: $rows if $rows != 1 then return -1 endi system sh/exec.sh -n dnode1 -s stop -x SIGKILL -sleep 2000 print =============== step6 -system sh/exec.sh -n dnode1 -s start -x SIGKILL -sleep 2000 +system sh/exec.sh -n dnode1 -s start sql select * from t1; print rows: $rows if $rows != 1 then return -1 endi system sh/exec.sh -n dnode1 -s stop -x SIGKILL -sleep 2000 print =============== step7 -system sh/exec.sh -n dnode1 -s start -x SIGKILL -sleep 2000 +system sh/exec.sh -n dnode1 -s start sql select * from t1; print rows: $rows if $rows != 1 then return -1 endi system sh/exec.sh -n dnode1 -s stop -x SIGKILL -sleep 2000 print =============== step8 -system sh/exec.sh -n dnode1 -s start -x SIGKILL -sleep 2000 +system sh/exec.sh -n dnode1 -s start sql select * from t1; print rows: $rows if $rows != 1 then return -1 endi + system sh/exec.sh -n dnode1 -s stop -x SIGKILL From f7ea72b0a633d34bab06e56137ecff4fcc8ec7cd Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 13 Jul 2022 20:47:31 +0800 Subject: [PATCH 53/56] test: restore some 2.0 case --- tests/script/general/stream/agg_stream.sim | 316 ------------------ tests/script/general/stream/column_stream.sim | 200 ----------- tests/script/general/stream/metrics_del.sim | 95 ------ .../stream/metrics_replica1_vnoden.sim | 245 -------------- .../script/general/stream/restart_stream.sim | 176 ---------- tests/script/general/stream/stream_1970.sim | 73 ---- tests/script/general/stream/stream_3.sim | 201 ----------- .../script/general/stream/stream_restart.sim | 142 -------- tests/script/general/stream/table_del.sim | 90 ----- .../general/stream/table_replica1_vnoden.sim | 299 ----------------- tests/script/general/stream/testSuite.sim | 6 - 11 files changed, 1843 deletions(-) delete mode 100644 tests/script/general/stream/agg_stream.sim delete mode 100644 tests/script/general/stream/column_stream.sim delete mode 100644 tests/script/general/stream/metrics_del.sim delete mode 100644 tests/script/general/stream/metrics_replica1_vnoden.sim delete mode 100644 tests/script/general/stream/restart_stream.sim delete mode 100644 tests/script/general/stream/stream_1970.sim delete mode 100644 tests/script/general/stream/stream_3.sim delete mode 100644 tests/script/general/stream/stream_restart.sim delete mode 100644 tests/script/general/stream/table_del.sim delete mode 100644 tests/script/general/stream/table_replica1_vnoden.sim delete mode 100644 tests/script/general/stream/testSuite.sim diff --git a/tests/script/general/stream/agg_stream.sim b/tests/script/general/stream/agg_stream.sim deleted file mode 100644 index 548f59cab7..0000000000 --- a/tests/script/general/stream/agg_stream.sim +++ /dev/null @@ -1,316 +0,0 @@ -system sh/stop_dnodes.sh - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 - -print ========== step1 -system sh/cfg.sh -n dnode1 -c monitor -v 1 -system sh/cfg.sh -n dnode1 -c monitorInterval -v 1 -system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 30000 -system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 30000 -system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 30000 -system sh/cfg.sh -n dnode1 -c maxShellConns -v 30000 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print =============== step2 -sql create database d4 precision 'us' -sql use d4 -sql create table t1 (ts timestamp, i int) -sql insert into d4.t1 values(1626739200000, 1) - -sql create table d4.s001 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s002 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s003 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s004 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s005 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s006 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s007 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s008 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s009 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s000 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s011 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s012 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s013 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s014 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s015 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s016 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s017 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s018 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s019 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s010 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s021 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s022 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s023 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s024 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s025 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s026 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s027 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s028 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s029 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s020 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s031 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s032 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s033 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s034 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s035 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s036 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s037 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s038 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s039 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s030 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s041 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s042 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s043 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s044 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s045 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s046 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s047 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s048 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s049 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s040 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s051 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s052 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s053 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s054 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s055 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s056 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s057 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s058 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s059 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s050 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s061 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s062 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s063 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s064 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s065 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s066 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s067 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s068 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s069 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s060 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s071 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s072 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s073 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s074 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s075 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s076 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s077 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s078 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s079 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s070 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s081 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s082 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s083 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s084 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s085 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s086 as select count(req_http), count(req_insert), avg(req_select), sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s087 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s088 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s089 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s080 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s091 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s092 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s093 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s094 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s095 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s096 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s097 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s098 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s099 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s090 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -print =============== step21 - -sql create table d4.s101 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s102 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s103 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s104 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s105 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s106 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s107 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s108 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s109 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s100 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s111 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s112 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s113 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s114 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s115 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s116 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s117 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s118 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s119 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s110 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s121 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s122 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s123 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s124 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s125 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s126 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s127 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s128 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s129 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s120 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s131 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s132 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s133 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s134 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s135 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s136 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s137 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s138 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s139 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s130 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s141 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s142 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s143 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s144 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s145 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s146 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s147 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s148 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s149 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s140 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s151 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s152 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s153 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s154 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s155 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s156 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s157 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s158 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s159 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s150 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s161 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s162 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s163 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s164 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s165 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s166 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s167 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s168 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s169 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s160 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s171 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s172 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s173 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s174 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s175 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s176 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s177 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s178 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s179 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s170 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s181 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s182 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s183 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s184 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s185 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s186 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s187 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s188 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s189 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s180 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - -sql create table d4.s191 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s192 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s193 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s194 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s195 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s196 as select count(req_http), count(req_insert) , sum(req_insert), max(req_select), min(req_insert) from log.dn_192_168_0_1 interval(5s) -sql create table d4.s197 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd) from log.dn interval(5s) -sql create table d4.s198 as select count(mem_taosd), count(mem_system), avg(mem_taosd), avg(mem_system), sum(mem_taosd), max(mem_taosd), min(mem_taosd) from log.dn interval(5s) -sql create table d4.s199 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used) from log.dn interval(5s) -sql create table d4.s190 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read) from log.dn interval(5s) - - -print =============== step3 -print sleep 22 seconds -sleep 50000 - -sql select * from d4.s001 -$s1 = $rows -print select * from d4.s001 ==> $s1 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s002 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s003 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s004 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s005 -$s5 = $rows -print select * from d4.s005 ==> $s5 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s006 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s007 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s008 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s009 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s010 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s101 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s102 -$s12 = $rows -print select * from d4.s102 ==> $s12 -if $rows <= 0 then - return -1 -endi diff --git a/tests/script/general/stream/column_stream.sim b/tests/script/general/stream/column_stream.sim deleted file mode 100644 index 59a65f0969..0000000000 --- a/tests/script/general/stream/column_stream.sim +++ /dev/null @@ -1,200 +0,0 @@ -system sh/stop_dnodes.sh - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 - -print ========== step1 -system sh/cfg.sh -n dnode1 -c monitor -v 1 -system sh/cfg.sh -n dnode1 -c monitorInterval -v 1 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print =============== step1 -sleep 2000 -sql select * from log.dn -if $rows == 0 then - return -1 -endi - -print =============== step2 -sql create database d4 precision 'us' -sql use d4 -sql create table t1 (ts timestamp, i int) -sql insert into d4.t1 values(1626739200000, 1) - -sql create table d4.s1 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd), stddev(cpu_taosd), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn_192_168_0_1 interval(5s) - -sql create table d4.s2 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd), stddev(cpu_taosd), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn_192_168_0_1 interval(5s) - -sql create table d4.s3 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used), stddev(disk_used), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn_192_168_0_1 interval(5s) - -sql create table d4.s4 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read), stddev(io_write), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn_192_168_0_1 interval(5s) - -sql create table d4.s5 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed), stddev(band_speed), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn_192_168_0_1 interval(5s) - -sql create table d4.s6 as select count(req_http), count(req_insert), avg(req_http), avg(req_select), sum(req_insert), max(req_select), min(req_insert), stddev(req_select), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn_192_168_0_1 interval(5s) - -sql create table d4.s7 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn interval(5s) - -sql create table d4.s8 as select count(cpu_taosd), count(cpu_system), avg(cpu_taosd), avg(cpu_system), sum(cpu_taosd), max(cpu_taosd), min(cpu_taosd), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn interval(5s) - -sql create table d4.s9 as select count(disk_used), count(disk_total), avg(disk_used), avg(disk_total), sum(disk_used), max(disk_used), min(disk_used), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn interval(5s) - -sql create table d4.s10 as select count(io_read), count(io_write), avg(io_read), avg(io_write), sum(io_read), max(io_write), min(io_read), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn interval(5s) - -sql create table d4.s11 as select count(band_speed), avg(band_speed), sum(band_speed), max(band_speed), min(band_speed), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn interval(5s) - -sql create table d4.s12 as select count(req_http), count(req_insert), avg(req_select), avg(req_insert), sum(req_insert), max(req_select), min(req_insert), count(*) as c1, count(*) as c2, count(*) as c3, count(*) as c4, count(*) as c5, count(*) as c6, count(*) as c7, count(*) as c8, count(*) as c9, count(*) as c10, count(*) as c11, count(*) as c12, count(*) as c13, count(*) as c14, count(*) as c15, count(*) as c16, count(*) as c17, count(*) as c18, count(*) as c19, count(*) as c20, count(*) as c21, count(*) as c22, count(*) as c23, count(*) as c24, count(*) as c25, count(*) as c26, count(*) as c27, count(*) as c28, count(*) as c29, count(*) as c30 from log.dn interval(5s) - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -sql select * from d4.s1 -$s1 = $rows -print select * from d4.s1 ==> $s1 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s2 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s3 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s4 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s5 -$s5 = $rows -print select * from d4.s5 ==> $s5 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s6 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s7 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s8 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s9 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s10 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s11 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s12 -$s12 = $rows -print select * from d4.s12 ==> $s12 -if $rows <= 0 then - return -1 -endi - -print =============== step4 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 -system sh/exec.sh -n dnode1 -s start -print sleep 22 seconds -sleep 22000 - -sql select * from d4.s1 -print select * from d4.s1 ==> $rows $s1 -if $rows <= 0 then - return -1 -endi -if $rows <= $s1 then - return -1 -endi - -sql select * from d4.s2 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s3 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s4 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s5 -print select * from d4.s5 ==> $rows $s5 -if $rows <= 0 then - return -1 -endi -if $rows <= $s5 then - return -1 -endi - - -sql select * from d4.s6 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s7 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s8 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s9 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s10 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s11 -if $rows <= 0 then - return -1 -endi - -sql select * from d4.s12 -print select * from d4.s12 ==> $rows $s12 -if $rows <= 0 then - return -1 -endi -if $rows <= $s12 then - return -1 -endi \ No newline at end of file diff --git a/tests/script/general/stream/metrics_del.sim b/tests/script/general/stream/metrics_del.sim deleted file mode 100644 index 6cc3da71e9..0000000000 --- a/tests/script/general/stream/metrics_del.sim +++ /dev/null @@ -1,95 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = md_db -$tbPrefix = md_tb -$mtPrefix = md_mt -$stPrefix = md_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = 0 - $y = 0 - while $y < $rowNum - $ts = 1626710400000 + $x - sql insert into $tb values ($ts , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c3 - -sql select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) - -print =============== step3 - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql drop table $tb - $i = $i + 1 -endw -sql drop table $mt - -print =============== step4 -print sleep 120 seconds -sleep 120000 - -print =============== step5 -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st -print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 -if $data01 != null then - return -1 -endi -if $data02 != null then - return -1 -endi -if $data03 != null then - return -1 -endi diff --git a/tests/script/general/stream/metrics_replica1_vnoden.sim b/tests/script/general/stream/metrics_replica1_vnoden.sim deleted file mode 100644 index db1044a597..0000000000 --- a/tests/script/general/stream/metrics_replica1_vnoden.sim +++ /dev/null @@ -1,245 +0,0 @@ -system sh/stop_dnodes.sh - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 -system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = m1v_db -$tbPrefix = m1v_tb -$mtPrefix = m1v_mt -$stPrefix = m1v_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -400 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (1626739200000 $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < 1626739440001 interval(1d) -print select count(tbcol) from $mt where ts < 1626739440000 interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < 1626739200000 + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 120 seconds -sleep 120000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/general/stream/restart_stream.sim b/tests/script/general/stream/restart_stream.sim deleted file mode 100644 index 62e47f9b3a..0000000000 --- a/tests/script/general/stream/restart_stream.sim +++ /dev/null @@ -1,176 +0,0 @@ -system sh/stop_dnodes.sh - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect -print ======================== dnode1 start - -$i = 0 -$dbPrefix = rs_db -$tbPrefix = rs_tb -$mtPrefix = rs_mt -$stPrefix = rs_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -$db = $dbPrefix -$tb = $tbPrefix . $i -$mt = $mtPrefix -$stm = $stPrefix . m -$stt = $stPrefix . t - -print =============== step1 -sql create database $db -sql use $db - -sql create table $mt (ts timestamp, tbcol int, tbcol2 int ) TAGS(tgcol bool) -$i = 0 -while $i < 10 - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) - - $x = -400 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (1626739200000 $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -print =============== step2 -$i = 0 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print ===>rows $rows, data $data01 -if $rows != 1 then - return -1 -endi -if $data01 != 20 then - return -1 -endi - -sql select count(*) from $mt interval(1d) -print ===>rows $rows, data $data01 -if $rows != 1 then - return -1 -endi -if $data01 != 200 then - return -1 -endi - -print =============== step3 -sql create table $stt as select count(*) from $tb interval(1d) -sql create table $stm as select count(*) from $mt interval(1d) - -print sleep 120 seconds -sleep 120000 - -sql select * from $stt -print select count(*) from $stt ===> $data00 $data01 -if $rows != 1 then - return -1 -endi -if $data01 != 20 then - return -1 -endi - -sql select * from $stm -print select * from $stm ===> $data00 $data01 -if $rows != 1 then - return -1 -endi -if $data01 != 200 then - return -1 -endi - -print =============== step4 -system sh/exec.sh -n dnode1 -s stop -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -print =============== step5 -print ==> renew cache -sql reset query cache -sleep 1000 - - -print =============== step6 -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol bigint, tbcol2 bigint ) TAGS(tgcol int) -$i = 0 -while $i < 5 - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) - - $x = -400 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (1626739200000 $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - - -print =============== step7 - -sql select count(*) from $tb interval(1d) -print ===>rows $rows, data $data01 -if $rows != 1 then - return -1 -endi -if $data01 != 20 then - return -1 -endi - -sql select count(*) from $mt interval(1d) -print ===>rows $rows, data $data01 -if $rows != 1 then - return -1 -endi -if $data01 != 100 then - return -1 -endi - -print =============== step8 -sql create table $stt as select count(*) from $tb interval(1d) -sql create table $stm as select count(*) from $mt interval(1d) - -print sleep 120 seconds -sleep 120000 - -sql select * from $stt -sleep 1000 -print ===>rows $rows, data $data01 -if $rows != 1 then - return -1 -endi -if $data01 != 20 then - return -1 -endi - -sql select * from $stm -print ===>rows $rows, data $data01 -if $rows != 1 then - return -1 -endi -if $data01 != 100 then - return -1 -endi - - diff --git a/tests/script/general/stream/stream_1970.sim b/tests/script/general/stream/stream_1970.sim deleted file mode 100644 index 30a733c08f..0000000000 --- a/tests/script/general/stream/stream_1970.sim +++ /dev/null @@ -1,73 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = s3_db -$tbPrefix = s3_tb -$mtPrefix = s3_mt -$stPrefix = s3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db keep 36500 -sql use $db -sql create stable $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -sql create table cq1 as select count(*) from $mt interval(10s); - -sleep 1000 - -sql create table $st using $mt tags(1); - -sql insert into $st values (-50000, 1, 1.0); -sql insert into $st values (-10000, 1, 1.0); -sql insert into $st values (10000, 1, 1.0); - - -$i = 0 -while $i < 12 - sql select * from cq1; - - if $rows != 3 then - sleep 10000 - else - if $data00 != @70-01-01 07:59:10.000@ then - return -1 - endi - if $data01 != 1 then - return -1 - endi - if $data10 != @70-01-01 07:59:50.000@ then - return -1 - endi - if $data11 != 1 then - return -1 - endi - if $data20 != @70-01-01 08:00:10.000@ then - return -1 - endi - if $data21 != 1 then - return -1 - endi - $i = 12 - endi - - $i = $i + 1 -endw - diff --git a/tests/script/general/stream/stream_3.sim b/tests/script/general/stream/stream_3.sim deleted file mode 100644 index b043993814..0000000000 --- a/tests/script/general/stream/stream_3.sim +++ /dev/null @@ -1,201 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = s3_db -$tbPrefix = s3_tb -$mtPrefix = s3_mt -$stPrefix = s3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -400 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (1626739200000 $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 -print sleep 120 seconds -sleep 120000 - -print =============== step6 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -print =============== step7 - -system sh/exec.sh -n dnode1 -s stop -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sleep 4000 -print ======================== dnode1 start - -$dbPrefix = stst3db -$tbPrefix = stst3tb -$mtPrefix = stst3mt -$stPrefix = stst3st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step8 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step8 -step8: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -400 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (1626739200000 $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step9 c3 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi -if $data02 != $rowNum then - return -1 -endi -if $data03 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -print =============== step10 -print sleep 120 seconds -sleep 120000 - -print =============== step11 -#$st = $stPrefix . c3 -#sql select * from $st -x step11 -# print ===> select * from $st first time should be error -# return -1 -#step11: - -print =============== step12 -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st -print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 -if $data01 != $rowNum then - return -1 -endi -if $data02 != $rowNum then - return -1 -endi -if $data03 != $rowNum then - return -1 -endi - - diff --git a/tests/script/general/stream/stream_restart.sim b/tests/script/general/stream/stream_restart.sim deleted file mode 100644 index 54a60a0081..0000000000 --- a/tests/script/general/stream/stream_restart.sim +++ /dev/null @@ -1,142 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = sr_db -$tbPrefix = sr_tb -$mtPrefix = sr_mt -$stPrefix = sr_st -$tbNum = 10 -$rowNum = 200 -$totalNum = 200 - -print =============== step1 - -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (1626739200000 + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -sql show tables -if $rows != 13 then - return -1 -endi - -print =============== step3 -sleep 2000 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 1000 -system sh/exec.sh -n dnode1 -s start - -print =============== step4 -print sleep 120 seconds -sleep 120000 - -print =============== step5 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -print $tb ==> $rows $data00 $data01 -if $rows != $rowNum then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -print $tb ==> $rows $data00 $data01 -if $rows != $rowNum then - return -1 -endi - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -print $tb ==> $rows $data00 $data01 -if $rows != $rowNum then - return -1 -endi - -print =============== step6 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 -if $rows <= 1 then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 -if $rows <= 1 then - return -1 -endi - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 -if $rows <= 1 then - return -1 -endi - - diff --git a/tests/script/general/stream/table_del.sim b/tests/script/general/stream/table_del.sim deleted file mode 100644 index 34673605d6..0000000000 --- a/tests/script/general/stream/table_del.sim +++ /dev/null @@ -1,90 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = td_db -$tbPrefix = td_tb -$mtPrefix = td_mt -$stPrefix = td_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -400 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (1626739200000 $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c3 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi -if $data02 != $rowNum then - return -1 -endi -if $data03 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -print =============== step3 -sql drop table $tb - -print =============== step4 -print sleep 120 seconds -sleep 120000 - -print =============== step5 -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st -print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 -if $data01 != null then - return -1 -endi -if $data02 != null then - return -1 -endi -if $data03 != null then - return -1 -endi diff --git a/tests/script/general/stream/table_replica1_vnoden.sim b/tests/script/general/stream/table_replica1_vnoden.sim deleted file mode 100644 index 4a6c4fe046..0000000000 --- a/tests/script/general/stream/table_replica1_vnoden.sim +++ /dev/null @@ -1,299 +0,0 @@ -system sh/stop_dnodes.sh - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 -system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = t1v_db -$tbPrefix = t1v_tb -$mtPrefix = t1v_mt -$stPrefix = t1v_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -400 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (1626739200000 $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . st -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < 1626739440001 interval(1d) -print select count(tbcol) from $tb where ts < 1626739440001 interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < 1626739200000 + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 120 seconds -sleep 120000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . st -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/general/stream/testSuite.sim b/tests/script/general/stream/testSuite.sim deleted file mode 100644 index 4a9912b848..0000000000 --- a/tests/script/general/stream/testSuite.sim +++ /dev/null @@ -1,6 +0,0 @@ -run general/stream/stream_3.sim -run general/stream/stream_restart.sim -run general/stream/table_del.sim -run general/stream/metrics_del.sim -run general/stream/table_replica1_vnoden.sim -run general/stream/metrics_replica1_vnoden.sim \ No newline at end of file From d40191b62c9daaefc6e98d1b8c1a11ffdec66b23 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 13 Jul 2022 20:58:14 +0800 Subject: [PATCH 54/56] fix(query): prepare the buffer before loading data. --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 8786d30007..ba89592189 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2593,7 +2593,7 @@ static SSDataBlock* getTableDataBlock(void* param) { SDataBlockInfo binfo = pBlock->info; tsdbRetrieveDataBlockInfo(reader, &binfo); - blockDataEnsureCapacity(pBlock, binfo.capacity); + blockDataEnsureCapacity(pBlock, binfo.rows); pBlock->info.type = binfo.type; pBlock->info.uid = binfo.uid; pBlock->info.window = binfo.window; From 083de75afcd1fd0a35ee16218374d65f97b53254 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 13 Jul 2022 23:15:58 +0800 Subject: [PATCH 55/56] fix(query):fix memory leak. --- source/common/src/tdatablock.c | 2 -- source/dnode/vnode/src/tsdb/tsdbRead.c | 25 ++++++++++++++++++------- source/libs/executor/src/executorimpl.c | 19 ++++++++++++++++--- source/libs/executor/src/scanoperator.c | 6 ------ source/libs/function/src/builtinsimpl.c | 12 ++++++++++-- 5 files changed, 44 insertions(+), 20 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 9b65e08d29..f51023189d 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1174,8 +1174,6 @@ int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows) int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows) { int32_t code = 0; - // ASSERT(numOfRows > 0); - if (numOfRows == 0) { return TSDB_CODE_SUCCESS; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index e86a14a30f..4aaa80d3ae 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -234,6 +234,7 @@ static void destroyBlockScanInfo(SHashObj* pTableMap) { } taosArrayDestroy(p->delSkyline); + taosArrayDestroy(p->pBlockList); p->delSkyline = NULL; } @@ -302,9 +303,9 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) { STimeWindow win = {0}; while (1) { -// if (pReader->pFileReader != NULL) { -// tsdbDataFReaderClose(&pReader->pFileReader); -// } + if (pReader->pFileReader != NULL) { + tsdbDataFReaderClose(&pReader->pFileReader); + } pReader->status.pCurrentFileset = (SDFileSet*)taosArrayGet(pIter->pFileList, pIter->index); @@ -696,12 +697,14 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_ void* p = taosArrayPush(pScanInfo->pBlockList, &block); if (p == NULL) { + tMapDataClear(&mapData); return TSDB_CODE_OUT_OF_MEMORY; } (*numOfBlocks) += 1; } + tMapDataClear(&mapData); if (pScanInfo->pBlockList != NULL && taosArrayGetSize(pScanInfo->pBlockList) > 0) { (*numOfValidTables) += 1; } @@ -1308,6 +1311,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte pReader->idStr); pBlockIter->index = asc ? 0 : (numOfBlocks - 1); + + cleanupBlockOrderSupporter(&sup); return TSDB_CODE_SUCCESS; } @@ -1990,6 +1995,7 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { SReaderStatus* pStatus = &pReader->status; + SArray* pIndexList = taosArrayInit(4, sizeof(SBlockIdx)); while (1) { bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader); @@ -1997,9 +2003,10 @@ static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { break; } - SArray* pIndexList = taosArrayInit(4, sizeof(SBlockIdx)); + taosArrayClear(pIndexList); int32_t code = doLoadBlockIndex(pReader, pReader->pFileReader, pIndexList); if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroy(pIndexList); return code; } @@ -2007,6 +2014,7 @@ static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { uint32_t numOfValidTable = 0; code = doLoadFileBlock(pReader, pIndexList, &numOfValidTable, numOfBlocks); if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroy(pIndexList); return code; } @@ -2014,10 +2022,10 @@ static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { break; } } - // no blocks in current file, try next files } + taosArrayDestroy(pIndexList); return TSDB_CODE_SUCCESS; } @@ -3081,10 +3089,13 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa SDataBlockIter* pBlockIter = &pStatus->blockIter; pTableBlockInfo->numOfFiles += pStatus->fileIter.numOfFiles; - pTableBlockInfo->numOfBlocks += pBlockIter->numOfBlocks; + + if (pBlockIter->numOfBlocks > 0) { + pTableBlockInfo->numOfBlocks += pBlockIter->numOfBlocks; + } pTableBlockInfo->numOfTables = numOfTables; - bool hasNext = true; + bool hasNext = (pBlockIter->numOfBlocks > 0); while (true) { if (hasNext) { diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 760d7e55c8..4e69caae0c 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3475,10 +3475,13 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) { static void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) { for (int32_t i = 0; i < numOfExprs; ++i) { SExprInfo* pExprInfo = &pExpr[i]; - if (pExprInfo->pExpr->nodeType == QUERY_NODE_COLUMN) { - taosMemoryFree(pExprInfo->base.pParam[0].pCol); + for(int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) { + if (pExprInfo->base.pParam[j].type == FUNC_PARAM_TYPE_COLUMN) { + taosMemoryFreeClear(pExprInfo->base.pParam[j].pCol); + } + taosMemoryFree(pExprInfo->base.pParam); } - taosMemoryFree(pExprInfo->base.pParam); + taosMemoryFree(pExprInfo->pExpr); } } @@ -3685,10 +3688,20 @@ void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } + +static void freeItem(void* pItem) { + void** p = pItem; + if (*p != NULL) { + taosMemoryFreeClear(*p); + } +} + void destroyAggOperatorInfo(void* param, int32_t numOfOutput) { SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); + cleanupAggSup(&pInfo->aggSup); + taosArrayDestroyEx(pInfo->groupResInfo.pRows, freeItem); taosMemoryFreeClear(param); } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index c7112ab8a6..2098e515df 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -196,12 +196,6 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pCost->skipBlocks += 1; - // clear all data in pBlock that are set when handing the previous block - for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); ++i) { - SColumnInfoData* pcol = taosArrayGet(pBlock->pDataBlock, i); - pcol->pData = NULL; - } - return TSDB_CODE_SUCCESS; } else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) { pCost->loadBlockStatis += 1; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index c1143020f0..e622c0c1af 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5559,6 +5559,10 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, 0); + if (pData->totalRows == 0) { + pData->minRows = 0; + } + int32_t row = 0; char st[256] = {0}; double totalRawSize = pData->totalRows * pData->rowSize; @@ -5570,10 +5574,14 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { varDataSetLen(st, len); colDataAppend(pColInfo, row++, st, false); + int64_t avgRows = 0; + if (pData->numOfBlocks > 0) { + avgRows = pData->totalRows / pData->numOfBlocks; + } + len = sprintf(st + VARSTR_HEADER_SIZE, "Total_Rows=[%" PRId64 "] Inmem_Rows=[%d] MinRows=[%d] MaxRows=[%d] Average_Rows=[%" PRId64 "]", - pData->totalRows, pData->numOfInmemRows, pData->minRows, pData->maxRows, - pData->totalRows / pData->numOfBlocks); + pData->totalRows, pData->numOfInmemRows, pData->minRows, pData->maxRows, avgRows); varDataSetLen(st, len); colDataAppend(pColInfo, row++, st, false); From b2b69f18689ca8c86b1e4ad1ed65647d781eeb42 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 13 Jul 2022 23:42:59 +0800 Subject: [PATCH 56/56] fix(query):fix invalid write. --- source/libs/executor/src/executorimpl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 4e69caae0c..16f5e47efc 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3479,9 +3479,9 @@ static void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) { if (pExprInfo->base.pParam[j].type == FUNC_PARAM_TYPE_COLUMN) { taosMemoryFreeClear(pExprInfo->base.pParam[j].pCol); } - taosMemoryFree(pExprInfo->base.pParam); } + taosMemoryFree(pExprInfo->base.pParam); taosMemoryFree(pExprInfo->pExpr); } }