From f70321ee53f2403e770acd2bda3a41038b437549 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 10:52:48 +0800 Subject: [PATCH 01/51] fix(vnd): check return value. --- source/dnode/vnode/src/vnd/vnodeSvr.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 371eaa0774..1b1bb9257d 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -640,40 +640,39 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg } } break; case TDMT_STREAM_TASK_DROP: { - if (tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { + if ((code = tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_TASK_UPDATE_CHKPT: { - if (tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { + if ((code = tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_CONSEN_CHKPT: { - if (pVnode->restored) { - if (tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg) < 0) { - goto _err; - } + if (pVnode->restored && (code = tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg)) < 0) { + goto _err; } + } break; case TDMT_STREAM_TASK_PAUSE: { if (pVnode->restored && vnodeIsLeader(pVnode) && - tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) { + (code = tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_TASK_RESUME: { if (pVnode->restored && vnodeIsLeader(pVnode) && - tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) { + (code = tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_VND_STREAM_TASK_RESET: { - if (pVnode->restored && vnodeIsLeader(pVnode)) { - if (tqProcessTaskResetReq(pVnode->pTq, pMsg) < 0) { + if (pVnode->restored && vnodeIsLeader(pVnode) && + (code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) { goto _err; } - } + } break; case TDMT_VND_ALTER_CONFIRM: needCommit = pVnode->config.hashChange; @@ -693,10 +692,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg case TDMT_VND_DROP_INDEX: vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp); break; - case TDMT_VND_STREAM_CHECK_POINT_SOURCE: + case TDMT_VND_STREAM_CHECK_POINT_SOURCE: // always return true tqProcessTaskCheckPointSourceReq(pVnode->pTq, pMsg, pRsp); break; - case TDMT_VND_STREAM_TASK_UPDATE: + case TDMT_VND_STREAM_TASK_UPDATE: // always return true tqProcessTaskUpdateReq(pVnode->pTq, pMsg); break; case TDMT_VND_COMPACT: @@ -752,7 +751,7 @@ _exit: _err: vError("vgId:%d, process %s request failed since %s, ver:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), - tstrerror(terrno), ver); + tstrerror(code), ver); return code; } From 3367f129daf04dbd3731c4481dd81a1a0dacff10 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 11:27:37 +0800 Subject: [PATCH 02/51] fix(vnd): check return value. --- source/dnode/vnode/src/vnd/vnodeSvr.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 1b1bb9257d..dd13c975cf 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -633,9 +633,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg } break; case TDMT_STREAM_TASK_DEPLOY: { - int32_t code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; + if ((code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len)) != TSDB_CODE_SUCCESS) { goto _err; } } break; From 5dc933f5f1b80048a114dcd2d3ae652d05779c3c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 14:52:51 +0800 Subject: [PATCH 03/51] refactor: add some logs. --- source/common/src/tdatablock.c | 1 + source/dnode/vnode/src/tsdb/tsdbRead2.c | 4 +++- source/util/src/tarray.c | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d8a66f82bf..0d00a6a4c7 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2529,6 +2529,7 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); if (pColInfoData == NULL) { code = terrno; + uError("invalid param, size of list:%d index k:%d", (int32_t) taosArrayGetSize(pDataBlock->pDataBlock), k) goto _exit; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 5b6511a38e..4e253d7c2e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -855,6 +855,7 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S STableBlockScanInfo** p = taosArrayGetLast(pTableScanInfoList); if (p == NULL) { clearBrinBlockIter(&iter); + tsdbError("invalid param, empty in tablescanInfoList, %s", pReader->idStr); return TSDB_CODE_INVALID_PARA; } @@ -5256,7 +5257,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) { // NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit // the data should be ingested in round-robin and all the child tables should be createted before ingesting data // the version range of query will be used to identify the correctness of suspend/resume functions. - // this function will blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files + // this function will be blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files #if SUSPEND_RESUME_TEST if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) { tsem_wait(&pReader->resumeAfterSuspend); @@ -5909,6 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_ } else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing } else { code = TSDB_CODE_INVALID_PARA; + tsdbError("invalid mr.me.type:%d %s, code:%s", mr.me.type, tstrerror(code)); metaReaderClear(&mr); return code; } diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 7989a2468b..b94bb512e2 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -200,6 +200,7 @@ void* taosArrayPop(SArray* pArray) { void* taosArrayGet(const SArray* pArray, size_t index) { if (NULL == pArray) { terrno = TSDB_CODE_INVALID_PARA; + uError("failed to return value from array of null ptr"); return NULL; } From 5c1cffed692e27fcbc042bd2f2c571c3b5357a8d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 15:54:15 +0800 Subject: [PATCH 04/51] fix(stream): add some logs. --- source/dnode/vnode/src/tqCommon/tqCommon.c | 7 ++----- source/libs/stream/src/streamMeta.c | 4 ++++ source/libs/stream/src/streamSched.c | 6 +++++- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 6b7e857120..3871011407 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -1119,10 +1119,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t int32_t vgId = pMeta->vgId; int32_t code = 0; - if (pTask == NULL) { - return -1; - } - streamTaskResume(pTask); ETaskStatus status = streamTaskGetStatus(pTask).state; @@ -1150,7 +1146,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t } } - streamMetaReleaseTask(pMeta, pTask); return code; } @@ -1173,6 +1168,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode); if (code != 0) { + streamMetaReleaseTask(pMeta, pTask); return code; } @@ -1186,6 +1182,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m streamMutexUnlock(&pHTask->lock); code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode); + streamMetaReleaseTask(pMeta, pHTask); } return code; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 44c9e76906..29152c6205 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -759,6 +759,10 @@ void streamMetaAcquireOneTask(SStreamTask* pTask) { } void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { + if (pTask == NULL) { + return; + } + int32_t taskId = pTask->id.taskId; int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1); diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 98920e6f70..095a5af6d4 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -63,7 +63,11 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3 pRunReq->reqType = execType; SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; - return tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); + int32_t code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); + if (code) { + stError("vgId:%d failed to put msg into stream queue, code:%s, %x", vgId, tstrerror(code), taskId); + } + return code; } void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; } From d2f2a931fb0cf58c27434a04b6e8ad9a53fb9916 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 16:21:11 +0800 Subject: [PATCH 05/51] fix(util): reset the returned length value. --- source/common/src/tdatablock.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2047573b74..98e58c8bd7 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2530,7 +2530,6 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); if (pColInfoData == NULL) { code = terrno; - uError("invalid param, size of list:%d index k:%d", (int32_t) taosArrayGetSize(pDataBlock->pDataBlock), k) goto _exit; } @@ -2611,7 +2610,10 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf if (code < 0) { uError("func %s failed to convert to ucs charset since %s", __func__, tstrerror(code)); goto _exit; + } else { // reset the length value + code = TSDB_CODE_SUCCESS; } + len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf); if (len >= size - 1) goto _exit; } break; From 6e43521ba9fb2e6d4b580ef09639a16bc3a5e9d8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 18:28:14 +0800 Subject: [PATCH 06/51] fix(stream): only keep the latest pause operation status. --- source/libs/stream/src/streamTaskSm.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 3501d30be4..a10c4c30d5 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -485,6 +485,11 @@ int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, _ static void keepPrevInfo(SStreamTaskSM* pSM) { STaskStateTrans* pTrans = pSM->pActiveTrans; + // we only keep the latest pause state + if (pSM->prev.state.state == TASK_STATUS__PAUSE && pSM->current.state == TASK_STATUS__PAUSE) { + return; + } + pSM->prev.state = pSM->current; pSM->prev.evt = pTrans->event; } @@ -501,9 +506,10 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even if (pTrans == NULL) { ETaskStatus s = pSM->current.state; - if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && - s != TASK_STATUS__UNINIT && s != TASK_STATUS__READY) { - stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, GET_EVT_NAME(pSM->prev.evt)); + if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT && + s != TASK_STATUS__READY) { + stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, + GET_EVT_NAME(pSM->prev.evt)); } // the pSM->prev.evt may be 0, so print string is not appropriate. From 7e1c6b07392f21b29c221af1399b73754ba61617 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 18:34:52 +0800 Subject: [PATCH 07/51] fix(stream): avoid the later pause overwrite the previous pause state. --- source/libs/stream/src/streamTaskSm.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index a10c4c30d5..17d5d884a7 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -485,11 +485,6 @@ int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, _ static void keepPrevInfo(SStreamTaskSM* pSM) { STaskStateTrans* pTrans = pSM->pActiveTrans; - // we only keep the latest pause state - if (pSM->prev.state.state == TASK_STATUS__PAUSE && pSM->current.state == TASK_STATUS__PAUSE) { - return; - } - pSM->prev.state = pSM->current; pSM->prev.evt = pTrans->event; } @@ -527,10 +522,13 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even return TSDB_CODE_STREAM_INVALID_STATETRANS; } - keepPrevInfo(pSM); + // repeat pause will not overwrite the previous pause state + if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) { + keepPrevInfo(pSM); - pSM->current = pTrans->next; - pSM->pActiveTrans = NULL; + pSM->current = pTrans->next; + pSM->pActiveTrans = NULL; + } // todo remove it // todo: handle the error code From a197b20466f111589d3822aa51f3e47eaf7fbf12 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 19:28:08 +0800 Subject: [PATCH 08/51] other: update logs. --- source/libs/stream/src/streamTimer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamTimer.c b/source/libs/stream/src/streamTimer.c index 8b77fe7cb1..0da9acfd1d 100644 --- a/source/libs/stream/src/streamTimer.c +++ b/source/libs/stream/src/streamTimer.c @@ -56,7 +56,7 @@ void streamTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* pParam, void* } } - stDebug("vgId:%d start %s tmr succ", vgId, pMsg); + stTrace("vgId:%d start %s tmr succ", vgId, pMsg); } void streamTmrStop(tmr_h tmrId) { From 50ceb19cbff69a3f96d7230e9f7b03667614f1bc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 11 Oct 2024 19:46:54 +0800 Subject: [PATCH 09/51] fix(stream): reset the activeTrans if pause recv repeatly. --- source/libs/stream/src/streamTaskSm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 17d5d884a7..c3a2742aa2 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -525,11 +525,12 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even // repeat pause will not overwrite the previous pause state if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) { keepPrevInfo(pSM); - pSM->current = pTrans->next; - pSM->pActiveTrans = NULL; + } else { + stDebug("s-task:%s repeat pause evt recv, not update prev status", id); } + pSM->pActiveTrans = NULL; // todo remove it // todo: handle the error code // on success callback, add into lock if necessary, or maybe we should add an option for this? From 618ec35190d5fa9f86f227767f87db30002aa621 Mon Sep 17 00:00:00 2001 From: charles Date: Sat, 12 Oct 2024 15:15:15 +0800 Subject: [PATCH 10/51] update encrypt test case for ts-5507 by charles --- tests/army/db-encrypt/basic.py | 30 ++++++++++++++++++++++++++++++ tests/parallel_test/cases.task | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/army/db-encrypt/basic.py b/tests/army/db-encrypt/basic.py index 8d30bbcfe2..ea648f1b8f 100644 --- a/tests/army/db-encrypt/basic.py +++ b/tests/army/db-encrypt/basic.py @@ -13,6 +13,7 @@ from frame.srvCtl import * from frame.caseBase import * from frame import * from frame.autogen import * +from frame import epath # from frame.server.dnodes import * # from frame.server.cluster import * @@ -20,7 +21,9 @@ from frame.autogen import * class TDTestCase(TBase): def init(self, conn, logSql, replicaVar=1): + updatecfgDict = {'dDebugFlag':131} super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1") + self.valgrind = 0 self.db = "test" self.stb = "meters" @@ -50,9 +53,36 @@ class TDTestCase(TBase): tdSql.error("create encrypt_key '12345678abcdefghi'") tdSql.error("create database test ENCRYPT_ALGORITHM 'sm4'") + def recreate_dndoe_encrypt_key(self): + """ + Description: From the jira TS-5507, the encrypt key can be recreated. + create: + 2024-09-23 created by Charles + update: + None + """ + # taosd path + taosd_path = epath.binPath() + tdLog.info(f"taosd_path: {taosd_path}") + # dnode2 path + dndoe2_path = tdDnodes.getDnodeDir(2) + dnode2_data_path = os.sep.join([dndoe2_path, "data"]) + dnode2_cfg_path = os.sep.join([dndoe2_path, "cfg"]) + tdLog.info(f"dnode2_path: {dnode2_data_path}") + # stop dnode2 + tdDnodes.stoptaosd(2) + tdLog.info("stop dndoe2") + # delete dndoe2 data + cmd = f"rm -rf {dnode2_data_path}" + os.system(cmd) + # recreate the encrypt key for dnode2 + os.system(f"{os.sep.join([taosd_path, "taosd"])} -y '1234567890' -c {dnode2_cfg_path}") + tdLog.info("test case: recreate the encrypt key for dnode2 passed") + def run(self): self.create_encrypt_db_error() self.create_encrypt_db() + self.recreate_dndoe_encrypt_key() def stop(self): tdSql.close() diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 0d3ed1f8e6..5d94c2a6b1 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -10,7 +10,7 @@ # army-test # ,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2 -,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py +,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py -N 3 -M 3 ,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py From 1980866fd94c7b2e8676a45b6666a3aae50258ab Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Sat, 12 Oct 2024 17:03:40 +0800 Subject: [PATCH 11/51] test:Add a function to verify the consistency between the expected result file and the actual output file --- tests/army/frame/common.py | 48 +++++++++ tests/army/query/function/ans/pi_1.csv | 114 +++++++++++++++++++++ tests/army/query/function/in/pi.in | 41 ++++---- tests/army/query/function/test_function.py | 15 ++- tests/system-test/7-tmq/ts-4674.py | 50 ++------- 5 files changed, 201 insertions(+), 67 deletions(-) create mode 100644 tests/army/query/function/ans/pi_1.csv diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index 913e88a7ad..a91866c0e1 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -18,6 +18,7 @@ import time import socket import json import toml +import subprocess from frame.boundary import DataBoundary import taos from frame.log import * @@ -1830,6 +1831,51 @@ class TDCom: if i == 1: self.record_history_ts = ts_value + def generate_query_result(self, inputfile, test_case): + if not os.path.exists(inputfile): + tdLog.exit(f"Input file '{inputfile}' does not exist.") + else: + self.query_result_file = f"./temp_{test_case}.result" + os.system(f"taos -f {inputfile} | grep -v 'Query OK'|grep -v 'Copyright'| grep -v 'Welcome to the TDengine Command' > {self.query_result_file} ") + return self.query_result_file + + def compare_result_files(self, file1, file2): + + try: + # 使用 subprocess.run 来执行 diff/fc 命令 + # print(file1, file2) + if platform.system().lower() != 'windows': + cmd='diff' + result = subprocess.run([cmd, "-u", "--color", file1, file2], text=True, capture_output=True) + else: + cmd='fc' + result = subprocess.run([cmd, file1, file2], text=True, capture_output=True) + # 如果输出不为空,则打印差异 + if result.stdout: + tdLog.debug(f"Differences between {file1} and {file2}") + tdLog.notice(f"\r\n{result.stdout}") + return False + else: + return True + except FileNotFoundError: + tdLog.debug("The 'diff' command is not found. Please make sure it's installed and available in your PATH.") + except Exception as e: + tdLog.debug(f"An error occurred: {e}") + + + def compare_testcase_result(self, inputfile,expected_file,test_case): + test_reulst_file = self.generate_query_result(inputfile,test_case) + + if self.compare_result_files(expected_file, test_reulst_file ): + tdLog.info("Test passed: Result files are identical.") + os.system(f"rm -f {test_reulst_file}") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.exit(f"{caller.lineno}(line:{caller.lineno}) failed: sqlfile is {inputfile}, expect_file:{expected_file} != reult_file:{test_reulst_file} ") + + tdLog.exit("Test failed: Result files are different.") + + def is_json(msg): if isinstance(msg, str): try: @@ -1864,4 +1910,6 @@ def dict2toml(in_dict: dict, file:str): with open(file, 'w') as f: toml.dump(in_dict, f) + + tdCom = TDCom() diff --git a/tests/army/query/function/ans/pi_1.csv b/tests/army/query/function/ans/pi_1.csv new file mode 100644 index 0000000000..6f9baf1a71 --- /dev/null +++ b/tests/army/query/function/ans/pi_1.csv @@ -0,0 +1,114 @@ + +taos> select pi() + pi() | +============================ + 3.141592653589793 | + +taos> select pi() + 1 + pi() + 1 | +============================ + 4.141592653589793 | + +taos> select pi() - 1 + pi() - 1 | +============================ + 2.141592653589793 | + +taos> select pi() * 2 + pi() * 2 | +============================ + 6.283185307179586 | + +taos> select pi() / 2 + pi() / 2 | +============================ + 1.570796326794897 | + +taos> select pi() from ts_4893.meters limit 5 + pi() | +============================ + 3.141592653589793 | + 3.141592653589793 | + 3.141592653589793 | + 3.141592653589793 | + 3.141592653589793 | + +taos> select pi() + 1 from ts_4893.meters limit 1 + pi() + 1 | +============================ + 4.141592653589793 | + +taos> select pi() - 1 from ts_4893.meters limit 1 + pi() - 1 | +============================ + 2.141592653589793 | + +taos> select pi() * 2 from ts_4893.meters limit 1 + pi() * 2 | +============================ + 6.283185307179586 | + +taos> select pi() / 2 from ts_4893.meters limit 1 + pi() / 2 | +============================ + 1.570796326794897 | + +taos> select pi() + pi() from ts_4893.meters limit 1 + pi() + pi() | +============================ + 6.283185307179586 | + +taos> select pi() - pi() from ts_4893.meters limit 1 + pi() - pi() | +============================ + 0.000000000000000 | + +taos> select pi() * pi() from ts_4893.meters limit 1 + pi() * pi() | +============================ + 9.869604401089358 | + +taos> select pi() / pi() from ts_4893.meters limit 1 + pi() / pi() | +============================ + 1.000000000000000 | + +taos> select pi() + id from ts_4893.meters order by ts limit 5 + pi() + id | +============================ + 3.141592653589793 | + 4.141592653589793 | + 5.141592653589793 | + 6.141592653589793 | + 7.141592653589793 | + +taos> select abs(pi()) + abs(pi()) | +============================ + 3.141592653589793 | + +taos> select pow(pi(), 2) + pow(pi(), 2) | +============================ + 9.869604401089358 | + +taos> select sqrt(pi()) + sqrt(pi()) | +============================ + 1.772453850905516 | + +taos> select cast(pi() as int) + cast(pi() as int) | +==================== + 3 | + +taos> select pi() + pi() | +============================ + 3.141592653589793 | + +taos> select substring_index(null, '.', 2) + substring_index(null, '.', 2) | +================================ + NULL | + diff --git a/tests/army/query/function/in/pi.in b/tests/army/query/function/in/pi.in index c0ccc0b079..dc5d24b655 100644 --- a/tests/army/query/function/in/pi.in +++ b/tests/army/query/function/in/pi.in @@ -1,20 +1,21 @@ -select pi(); -select pi() + 1; -select pi() - 1; -select pi() * 2; -select pi() / 2; -select pi() from ts_4893.meters limit 5; -select pi() + 1 from ts_4893.meters limit 1; -select pi() - 1 from ts_4893.meters limit 1; -select pi() * 2 from ts_4893.meters limit 1; -select pi() / 2 from ts_4893.meters limit 1; -select pi() + pi() from ts_4893.meters limit 1; -select pi() - pi() from ts_4893.meters limit 1; -select pi() * pi() from ts_4893.meters limit 1; -select pi() / pi() from ts_4893.meters limit 1; -select pi() + id from ts_4893.meters order by ts limit 5; -select abs(pi()); -select pow(pi(), 2); -select sqrt(pi()); -select cast(pi() as int); -select pi(); +select pi() +select pi() + 1 +select pi() - 1 +select pi() * 2 +select pi() / 2 +select pi() from ts_4893.meters limit 5 +select pi() + 1 from ts_4893.meters limit 1 +select pi() - 1 from ts_4893.meters limit 1 +select pi() * 2 from ts_4893.meters limit 1 +select pi() / 2 from ts_4893.meters limit 1 +select pi() + pi() from ts_4893.meters limit 1 +select pi() - pi() from ts_4893.meters limit 1 +select pi() * pi() from ts_4893.meters limit 1 +select pi() / pi() from ts_4893.meters limit 1 +select pi() + id from ts_4893.meters order by ts limit 5 +select abs(pi()) +select pow(pi(), 2) +select sqrt(pi()) +select cast(pi() as int) +select pi() +select substring_index(null, '.', 2) diff --git a/tests/army/query/function/test_function.py b/tests/army/query/function/test_function.py index 18a0d46711..aae0cf6eee 100644 --- a/tests/army/query/function/test_function.py +++ b/tests/army/query/function/test_function.py @@ -17,14 +17,15 @@ import random import taos import frame -import frame.etool +from frame.etool import * from frame.log import * from frame.cases import * from frame.sql import * from frame.caseBase import * -from frame import * +from frame import etool +from frame.common import * class TDTestCase(TBase): updatecfgDict = { @@ -84,8 +85,16 @@ class TDTestCase(TBase): tdSql.error(err_statement) err_statement = '' + def test_normal_query_new(self, testCase): + # read sql from .sql file and execute + tdLog.info(f"test normal query.") + self.sqlFile = etool.curFile(__file__, f"in/{testCase}.in") + self.ansFile = etool.curFile(__file__, f"ans/{testCase}_1.csv") + + tdCom.compare_testcase_result(self.sqlFile, self.ansFile, testCase) + def test_pi(self): - self.test_normal_query("pi") + self.test_normal_query_new("pi") def test_round(self): self.test_normal_query("round") diff --git a/tests/system-test/7-tmq/ts-4674.py b/tests/system-test/7-tmq/ts-4674.py index 709debaef1..0b3dc1b077 100644 --- a/tests/system-test/7-tmq/ts-4674.py +++ b/tests/system-test/7-tmq/ts-4674.py @@ -24,45 +24,6 @@ class TDTestCase: tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - #tdSql.init(conn.cursor(), logSql) # output sql.txt file - - # def consume_TS_4674_Test(self): - # - # os.system("nohup taosBenchmark -y -B 1 -t 4 -S 1000 -n 1000000 -i 1000 -v 1 -a 3 > /dev/null 2>&1 &") - # time.sleep() - # tdSql.execute(f'create topic topic_all with meta as database test') - # consumer_dict = { - # "group.id": "g1", - # "td.connect.user": "root", - # "td.connect.pass": "taosdata", - # "auto.offset.reset": "earliest", - # } - # consumer = Consumer(consumer_dict) - # - # try: - # consumer.subscribe(["topic_all"]) - # except TmqError: - # tdLog.exit(f"subscribe error") - # - # try: - # while True: - # res = consumer.poll(5) - # if not res: - # print(f"null") - # continue - # val = res.value() - # if val is None: - # print(f"null") - # continue - # cnt = 0; - # for block in val: - # cnt += len(block.fetchall()) - # - # print(f"block {cnt} rows") - # - # finally: - # consumer.close() - def get_leader(self): tdLog.debug("get leader") tdSql.query("show vnodes") @@ -74,19 +35,20 @@ class TDTestCase: def balance_vnode(self): leader_before = self.get_leader() - + tdSql.query("balance vgroup leader") while True: leader_after = -1 - tdSql.query("balance vgroup leader") + tdLog.debug("balancing vgroup leader") while True: + tdLog.debug("get new vgroup leader") leader_after = self.get_leader() if leader_after != -1 : - break; + break else: time.sleep(1) if leader_after != leader_before: tdLog.debug("leader changed") - break; + break else : time.sleep(1) @@ -115,7 +77,7 @@ class TDTestCase: except TmqError: tdLog.exit(f"subscribe error") - cnt = 0; + cnt = 0 balance = False try: while True: From e7b5c72ff23d3f75f4332c758cf06240efaac7a4 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Sat, 12 Oct 2024 18:13:11 +0800 Subject: [PATCH 12/51] Update basic.py --- tests/army/db-encrypt/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/army/db-encrypt/basic.py b/tests/army/db-encrypt/basic.py index ea648f1b8f..a93b85f0da 100644 --- a/tests/army/db-encrypt/basic.py +++ b/tests/army/db-encrypt/basic.py @@ -76,7 +76,7 @@ class TDTestCase(TBase): cmd = f"rm -rf {dnode2_data_path}" os.system(cmd) # recreate the encrypt key for dnode2 - os.system(f"{os.sep.join([taosd_path, "taosd"])} -y '1234567890' -c {dnode2_cfg_path}") + os.system(f"{os.sep.join([taosd_path, 'taosd'])} -y '1234567890' -c {dnode2_cfg_path}") tdLog.info("test case: recreate the encrypt key for dnode2 passed") def run(self): From 418319ef29a134003adae60091e8d7018dd38f2e Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 14 Oct 2024 14:23:25 +0800 Subject: [PATCH 13/51] tetst:modify comments --- tests/army/frame/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index a91866c0e1..b816095817 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -1842,7 +1842,7 @@ class TDCom: def compare_result_files(self, file1, file2): try: - # 使用 subprocess.run 来执行 diff/fc 命令 + # use subprocess.run to execute diff/fc commands # print(file1, file2) if platform.system().lower() != 'windows': cmd='diff' @@ -1850,7 +1850,7 @@ class TDCom: else: cmd='fc' result = subprocess.run([cmd, file1, file2], text=True, capture_output=True) - # 如果输出不为空,则打印差异 + # if result is not empty, print the differences and files name. Otherwise, the files are identical. if result.stdout: tdLog.debug(f"Differences between {file1} and {file2}") tdLog.notice(f"\r\n{result.stdout}") From 8663779def87d90d1702924f5d1c81a34633e73c Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 03:56:47 +0000 Subject: [PATCH 14/51] fix/wal-load-file-set --- source/libs/wal/src/walMeta.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 9ade5e5638..cb9f6e2dfe 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -253,6 +253,7 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) int j = 0; // both of the lists in asc order + /* for (int i = 0; i < actualFileNum; i++) { SWalFileInfo* pLogInfo = taosArrayGet(actualLogList, i); while (j < metaFileNum) { @@ -268,6 +269,7 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } } } + */ taosArrayClear(metaLogList); @@ -400,6 +402,17 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) { TAOS_RETURN(TSDB_CODE_SUCCESS); } +void printFileSet(SArray* fileSet) { + int32_t sz = taosArrayGetSize(fileSet); + for (int32_t i = 0; i < sz; i++) { + SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i); + wInfo("firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 ", createTs:%" PRId64 + ", closeTs:%" PRId64, + pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, pFileInfo->createTs, + pFileInfo->closeTs); + } +} + int32_t walCheckAndRepairMeta(SWal* pWal) { // load log files, get first/snapshot/last version info int32_t code = 0; @@ -460,6 +473,9 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { taosArraySort(actualLog, compareWalFileInfo); + wInfo("vgId:%d, wal path:%s, actual log file num:%" PRId64, pWal->cfg.vgId, pWal->path, taosArrayGetSize(actualLog)); + printFileSet(actualLog); + int metaFileNum = taosArrayGetSize(pWal->fileInfoSet); int actualFileNum = taosArrayGetSize(actualLog); int64_t firstVerPrev = pWal->vers.firstVer; @@ -474,6 +490,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { TAOS_RETURN(code); } + wInfo("vgId:%d, wal path:%s, meta log file num:%" PRId64, pWal->cfg.vgId, pWal->path, + taosArrayGetSize(pWal->fileInfoSet)); + printFileSet(pWal->fileInfoSet); + int32_t sz = taosArrayGetSize(pWal->fileInfoSet); // scan and determine the lastVer @@ -1124,6 +1144,10 @@ int32_t walLoadMeta(SWal* pWal) { (void)taosCloseFile(&pFile); taosMemoryFree(buf); + wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%" PRId64, pWal->cfg.vgId, fnameStr, + taosArrayGetSize(pWal->fileInfoSet)); + printFileSet(pWal->fileInfoSet); + TAOS_RETURN(code); } From ca7f490e6d6fbf07aa400c1c37d292180985924c Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 15 Oct 2024 16:53:15 +0800 Subject: [PATCH 15/51] fix invaild snapshotVer while repair wal meta file --- include/libs/wal/wal.h | 1 + source/dnode/mnode/impl/src/mndMain.c | 1 + source/dnode/vnode/src/vnd/vnodeCfg.c | 1 + source/dnode/vnode/src/vnd/vnodeCommit.c | 1 + source/libs/wal/src/walMeta.c | 6 ++++++ 5 files changed, 10 insertions(+) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 74ab0bf484..f95b3f20ca 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -50,6 +50,7 @@ typedef struct { int32_t rollPeriod; // secs int64_t retentionSize; int64_t segSize; + int64_t committed; EWalType level; // wal level int32_t encryptAlgorithm; char encryptKey[ENCRYPT_KEY_LEN + 1]; diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index bee971b966..685ad2b7a5 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -515,6 +515,7 @@ static int32_t mndInitWal(SMnode *pMnode) { .fsyncPeriod = 0, .rollPeriod = -1, .segSize = -1, + .committed = -1, .retentionPeriod = 0, .retentionSize = 0, .level = TAOS_WAL_FSYNC, diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index d3acea4766..7c789e84ae 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -45,6 +45,7 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1, .retentionPeriod = -1, .rollPeriod = 0, .segSize = 0, + .committed = 0, .retentionSize = -1, .level = TAOS_WAL_WRITE, .clearFiles = 0, diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 4a4d305f25..dae2b3a5ec 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -257,6 +257,7 @@ int vnodeLoadInfo(const char *dir, SVnodeInfo *pInfo) { code = vnodeDecodeInfo(pData, pInfo); TSDB_CHECK_CODE(code, lino, _exit); + pInfo->config.walCfg.committed = pInfo->state.committed; _exit: if (code) { if (pFile) { diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 9ade5e5638..8649581d5d 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -282,6 +282,12 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } static void walAlignVersions(SWal* pWal) { + if (pWal->cfg.committed > 0 && pWal->cfg.committed != pWal->vers.snapshotVer) { + wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is different from commited:%" PRId64 + ". in vnode/mnode. align with it.", + pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); + pWal->vers.snapshotVer = pWal->cfg.committed; + } if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) { wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId, pWal->vers.firstVer, pWal->vers.snapshotVer); From 5c3283a6de8485fba5200f63edaa957c172c54b8 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 15 Oct 2024 17:00:25 +0800 Subject: [PATCH 16/51] fix: not condition --- source/libs/scalar/src/filter.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index e07ef69990..802bec00f8 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -4679,6 +4679,9 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) { cell = cell->pNext; } + if (node->condType == LOGIC_COND_TYPE_NOT) { + stat->scalarMode = true; + } return DEAL_RES_CONTINUE; } From e4373116b2e566ff0834b3b65f76abc1ab61abf3 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 09:04:20 +0000 Subject: [PATCH 17/51] fix/wal-load-file-set --- source/libs/wal/src/walMeta.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index cb9f6e2dfe..f84496eee2 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -253,7 +253,6 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) int j = 0; // both of the lists in asc order - /* for (int i = 0; i < actualFileNum; i++) { SWalFileInfo* pLogInfo = taosArrayGet(actualLogList, i); while (j < metaFileNum) { @@ -269,7 +268,6 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } } } - */ taosArrayClear(metaLogList); @@ -553,6 +551,7 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { // repair ts of files TAOS_CHECK_RETURN(walRepairLogFileTs(pWal, &updateMeta)); + printFileSet(pWal->fileInfoSet); // update meta file if (updateMeta) { TAOS_CHECK_RETURN(walSaveMeta(pWal)); From 47d39c4ab8d5295750992b7373f57bb4943ec4fd Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 15 Oct 2024 17:23:28 +0800 Subject: [PATCH 18/51] fix wal test in ci --- source/libs/wal/test/walMetaTest.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index 8bd4de0a89..a0285f1363 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -127,6 +127,7 @@ class WalRetentionEnv : public ::testing::Test { SWalCfg cfg; cfg.rollPeriod = -1; cfg.segSize = -1; + cfg.committed =-1; cfg.retentionPeriod = -1; cfg.retentionSize = 0; cfg.rollPeriod = 0; From 39c429182e04a034fbd1f5dd8a95ff88056a79b2 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 10:46:25 +0000 Subject: [PATCH 19/51] fix/wal-load-file-set-fix-case --- source/libs/wal/src/walMeta.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index f84496eee2..17830ff200 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -471,7 +471,8 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { taosArraySort(actualLog, compareWalFileInfo); - wInfo("vgId:%d, wal path:%s, actual log file num:%" PRId64, pWal->cfg.vgId, pWal->path, taosArrayGetSize(actualLog)); + wInfo("vgId:%d, wal path:%s, actual log file num:%d", pWal->cfg.vgId, pWal->path, + (int32_t)taosArrayGetSize(actualLog)); printFileSet(actualLog); int metaFileNum = taosArrayGetSize(pWal->fileInfoSet); @@ -488,8 +489,8 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { TAOS_RETURN(code); } - wInfo("vgId:%d, wal path:%s, meta log file num:%" PRId64, pWal->cfg.vgId, pWal->path, - taosArrayGetSize(pWal->fileInfoSet)); + wInfo("vgId:%d, wal path:%s, meta log file num:%d", pWal->cfg.vgId, pWal->path, + (int32_t)taosArrayGetSize(pWal->fileInfoSet)); printFileSet(pWal->fileInfoSet); int32_t sz = taosArrayGetSize(pWal->fileInfoSet); @@ -1143,8 +1144,8 @@ int32_t walLoadMeta(SWal* pWal) { (void)taosCloseFile(&pFile); taosMemoryFree(buf); - wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%" PRId64, pWal->cfg.vgId, fnameStr, - taosArrayGetSize(pWal->fileInfoSet)); + wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%d", pWal->cfg.vgId, fnameStr, + (int32_t)taosArrayGetSize(pWal->fileInfoSet)); printFileSet(pWal->fileInfoSet); TAOS_RETURN(code); From 200ca2cb10cca5c10f189310e54ee35791f91155 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 15 Oct 2024 19:40:04 +0800 Subject: [PATCH 20/51] not test case --- tests/system-test/2-query/not.py | 133 +++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 tests/system-test/2-query/not.py diff --git a/tests/system-test/2-query/not.py b/tests/system-test/2-query/not.py new file mode 100644 index 0000000000..a0bd1d4e1d --- /dev/null +++ b/tests/system-test/2-query/not.py @@ -0,0 +1,133 @@ +from wsgiref.headers import tspecials +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.dbname = "db" + self.rowNum = 10 + self.ts = 1537146000000 + + def notConditionTest(self): + dbname = "nottest" + stbname = "st1" + + tdsql = tdCom.newTdSql() + tdsql.execute(f"create database if not exists {dbname}") + + stype = ["INT", "INT UNSIGNED", "BIGINT", "BIGINT UNSIGNED", "DOUBLE", "FLOAT", "SMALLINT", "SMALLINT UNSIGNED", "TINYINT", "TINYINT UNSIGNED"] + + for type_name in stype: + tdsql.execute(f"drop table if exists {dbname}.{stbname}") + tdsql.execute(f"create table if not exists {dbname}.{stbname} (ts timestamp, v1 {type_name}) tags(t1 {type_name})") + tdsql.execute(f"insert into {dbname}.sub_1 using {dbname}.{stbname} tags(1) values({self.ts}, 10)") + tdsql.execute(f"insert into {dbname}.sub_2 using {dbname}.{stbname} tags(2) values({self.ts + 1000}, 20)") + tdsql.execute(f"insert into {dbname}.sub_3 using {dbname}.{stbname} tags(3) values({self.ts + 2000}, 30)") + + # Test case 1: NOT IN + tdsql.query(f"select t1, * from {dbname}.{stbname} where t1 not in (1, 2) order by t1") + tdsql.checkRows(1) + tdsql.checkData(0, 0, 3) + + # Test case 2: NOT BETWEEN + tdsql.query(f"select * from {dbname}.{stbname} where v1 not between 10 and 20 order by t1") + tdsql.checkRows(1) + tdsql.checkData(0, 1, 30) + tdsql.query(f"select * from {dbname}.{stbname} where not(v1 not between 10 and 20) order by t1") + tdsql.checkRows(2) + + # Test case 4: NOT EQUAL + tdsql.query(f"select * from {dbname}.{stbname} where v1 != 20 order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 30) + + # Test case 8: NOT (v1 < 20 OR v1 > 30) + tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 > 30) order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 20) + tdsql.checkData(1, 1, 30) + + tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 >= 30) order by t1") + tdsql.checkRows(1) + + # Test case 9: NOT (t1 != 1) + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) order by t1") + tdsql.checkRows(1) + tdsql.checkData(0, 1, 10) + + tdsql.query(f"select * from {dbname}.{stbname} where (t1 != 1) or not (v1 == 20) order by t1") + tdsql.checkRows(3) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 20) + tdsql.checkData(2, 1, 30) + + tdsql.query(f"select * from {dbname}.{stbname} where not((t1 != 1) or not (v1 == 20)) order by t1") + tdsql.checkRows(0) + + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 20) order by t1") + tdsql.checkRows(0) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1) and not (v1 != 20)) order by t1") + tdsql.checkRows(3) + + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 10) order by t1") + tdsql.checkRows(1) + tdsql.checkData(0, 1, 10) + + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 > 2) order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 20) + + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 == 2) order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 30) + + tdsql.query(f"select * from {dbname}.{stbname} where not (v1 > 10 and v1 < 30) order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 30) + + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1") + # tdsql.checkRows(1) + # + # tdsql.checkData(0, 1, 20) + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1") + # tdsql.checkRows(2) + # + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1") + # tdsql.checkRows(2) + # + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1") + # tdsql.checkRows(1) + # + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1") + # tdsql.checkRows(1) + # + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1") + # tdsql.checkRows(1) + + def run(self): + dbname = "db" + tdSql.prepare() + + self.notConditionTest() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) + +tdCases.addLinux(__file__, TDTestCase()) From 2b83a20c69008dcbc532e88215dc96a4390e9caa Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Mon, 14 Oct 2024 17:19:29 +0800 Subject: [PATCH 21/51] Revert "fix:[TD-32334] Generate correct time window when using interp with fill next and linear." This reverts commit 77e63d0922f6d230a314d28863744185faab8aa5. --- source/libs/executor/src/timesliceoperator.c | 20 ++-- tests/system-test/2-query/interp.py | 120 +++++++------------ 2 files changed, 50 insertions(+), 90 deletions(-) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 70bf26405e..2ea300ace8 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -278,7 +278,7 @@ static bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t in } static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock, - SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo, bool genAfterBlock) { + SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; int32_t rows = pResBlock->info.rows; @@ -427,7 +427,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } - if (start.key == INT64_MIN || end.key == INT64_MIN || genAfterBlock) { + if (start.key == INT64_MIN || end.key == INT64_MIN) { colDataSetNULL(pDst, rows); break; } @@ -463,13 +463,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } - if (genAfterBlock && rows == 0) { - hasInterp = false; - break; - } - SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot); - if (pkey->isNull == false && !genAfterBlock) { + if (pkey->isNull == false) { code = colDataSetVal(pDst, rows, pkey->pData, false); QUERY_CHECK_CODE(code, lino, _end); } else { @@ -841,7 +836,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); if (nextTs > pSliceInfo->current) { while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { - if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo, false) && + if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo) && pSliceInfo->fillType == TSDB_FILL_LINEAR) { break; } else { @@ -869,7 +864,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS doKeepLinearInfo(pSliceInfo, pBlock, i); while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) { - if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo, false) && + if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo) && pSliceInfo->fillType == TSDB_FILL_LINEAR) { break; } else { @@ -914,12 +909,13 @@ static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperato SSDataBlock* pResBlock = pSliceInfo->pRes; SInterval* pInterval = &pSliceInfo->interval; - if (pSliceInfo->pPrevGroupKey == NULL) { + if (pSliceInfo->fillType == TSDB_FILL_NEXT || pSliceInfo->fillType == TSDB_FILL_LINEAR || + pSliceInfo->pPrevGroupKey == NULL) { return; } while (pSliceInfo->current <= pSliceInfo->win.ekey) { - (void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo, true); + (void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo); pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); } diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 3cdf52725a..bcfc389d7b 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -907,7 +907,7 @@ class TDTestCase: ## {. . .} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") - tdSql.checkRows(13) + tdSql.checkRows(12) tdSql.checkData(0, 0, 5) tdSql.checkData(1, 0, 5) tdSql.checkData(2, 0, 10) @@ -920,7 +920,6 @@ class TDTestCase: tdSql.checkData(9, 0, 15) tdSql.checkData(10, 0, 15) tdSql.checkData(11, 0, 15) - tdSql.checkData(12, 0, None) ## {} ... tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(next)") @@ -958,12 +957,10 @@ class TDTestCase: ## ..{.} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)") - tdSql.checkRows(5) + tdSql.checkRows(3) tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) tdSql.checkData(2, 0, 15) - tdSql.checkData(3, 0, None) - tdSql.checkData(4, 0, None) ## ... {} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)") @@ -1275,7 +1272,7 @@ class TDTestCase: tdSql.checkData(8, 1, True) tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") - tdSql.checkRows(13) + tdSql.checkRows(12) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-01 00:00:04.000') @@ -1290,7 +1287,6 @@ class TDTestCase: tdSql.checkData(9, 0, '2020-02-01 00:00:13.000') tdSql.checkData(10, 0, '2020-02-01 00:00:14.000') tdSql.checkData(11, 0, '2020-02-01 00:00:15.000') - tdSql.checkData(12, 0, '2020-02-01 00:00:16.000') tdSql.checkData(0, 1, True) tdSql.checkData(1, 1, False) @@ -1304,7 +1300,6 @@ class TDTestCase: tdSql.checkData(9, 1, True) tdSql.checkData(10, 1, True) tdSql.checkData(11, 1, False) - tdSql.checkData(12, 1, True) tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)") tdSql.checkRows(6) @@ -1682,13 +1677,9 @@ class TDTestCase: ## | . | { | .} | tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(next)") - tdSql.checkRows(6) + tdSql.checkRows(2) tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) - tdSql.checkData(2, 0, None) - tdSql.checkData(3, 0, None) - tdSql.checkData(4, 0, None) - tdSql.checkData(5, 0, None) # test fill linear @@ -2741,7 +2732,7 @@ class TDTestCase: tdSql.checkData(4, i, 15) tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)") - tdSql.checkRows(5) + tdSql.checkRows(3) tdSql.checkCols(4) for i in range (tdSql.queryCols): @@ -2837,7 +2828,7 @@ class TDTestCase: # test fill next tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(next)") - tdSql.checkRows(19) + tdSql.checkRows(18) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-02 00:00:00.000') @@ -2860,7 +2851,6 @@ class TDTestCase: tdSql.checkData(15, 2, None) tdSql.checkData(16, 2, None) tdSql.checkData(17, 2, None) - tdSql.checkData(18, 2, None) tdSql.checkData(17, 0, '2020-02-02 00:00:17.000') @@ -3091,7 +3081,7 @@ class TDTestCase: # test fill linear tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(linear)") - tdSql.checkRows(18) + tdSql.checkRows(17) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-02 00:00:01.000') @@ -3113,9 +3103,8 @@ class TDTestCase: tdSql.checkData(14, 2, None) tdSql.checkData(15, 2, None) tdSql.checkData(16, 2, None) - tdSql.checkData(17, 2, None) - tdSql.checkData(17, 0, '2020-02-02 00:00:18.000') + tdSql.checkData(16, 0, '2020-02-02 00:00:17.000') tdLog.printNoPrefix("==========step13:test error cases") @@ -3231,7 +3220,7 @@ class TDTestCase: tdSql.checkData(17, 1, True) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(19) + tdSql.checkRows(18) tdSql.checkData(0, 0, '2020-02-01 00:00:00.000') tdSql.checkData(0, 1, True) @@ -3254,12 +3243,9 @@ class TDTestCase: tdSql.checkData(15, 2, 15) tdSql.checkData(16, 2, 17) tdSql.checkData(17, 2, 17) - tdSql.checkData(18, 2, None) tdSql.checkData(17, 0, '2020-02-01 00:00:17.000') tdSql.checkData(17, 1, False) - tdSql.checkData(18, 0, '2020-02-01 00:00:18.000') - tdSql.checkData(18, 1, True) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(17) @@ -3376,24 +3362,24 @@ class TDTestCase: tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(57) - for i in range(0, 19): + tdSql.checkRows(48) + for i in range(0, 14): tdSql.checkData(i, 0, 'ctb1') - for i in range(19, 38): + for i in range(14, 30): tdSql.checkData(i, 0, 'ctb2') - for i in range(38, 57): + for i in range(30, 48): tdSql.checkData(i, 0, 'ctb3') tdSql.checkData(0, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(18, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(13, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(19, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(37, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:00.000') + tdSql.checkData(29, 1, '2020-02-01 00:00:15.000') - tdSql.checkData(38, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(56, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(30, 1, '2020-02-01 00:00:00.000') + tdSql.checkData(47, 1, '2020-02-01 00:00:17.000') for i in range(0, 2): tdSql.checkData(i, 3, 1) @@ -3404,33 +3390,24 @@ class TDTestCase: for i in range(8, 14): tdSql.checkData(i, 3, 13) - for i in range(14, 19): - tdSql.checkData(i, 3, None) - - for i in range(19, 23): + for i in range(14, 18): tdSql.checkData(i, 3, 3) - for i in range(23, 29): + for i in range(18, 24): tdSql.checkData(i, 3, 9) - for i in range(29, 35): + for i in range(24, 30): tdSql.checkData(i, 3, 15) - for i in range(35, 38): - tdSql.checkData(i, 3, None) - - for i in range(38, 44): + for i in range(30, 36): tdSql.checkData(i, 3, 5) - for i in range(44, 50): + for i in range(36, 42): tdSql.checkData(i, 3, 11) - for i in range(50, 56): + for i in range(42, 48): tdSql.checkData(i, 3, 17) - for i in range(56, 57): - tdSql.checkData(i, 3, None) - tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(39) @@ -3473,7 +3450,7 @@ class TDTestCase: tdSql.checkRows(90) tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(171) + tdSql.checkRows(90) tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(9) @@ -3490,7 +3467,7 @@ class TDTestCase: tdSql.checkRows(48) tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(57) + tdSql.checkRows(48) tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(39) @@ -4386,7 +4363,7 @@ class TDTestCase: tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") - tdSql.checkRows(11) + tdSql.checkRows(9) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, False) @@ -4396,8 +4373,6 @@ class TDTestCase: tdSql.checkData(6, 1, True) tdSql.checkData(7, 1, False) tdSql.checkData(8, 1, False) - tdSql.checkData(9, 1, True) - tdSql.checkData(10, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 3) @@ -4408,13 +4383,11 @@ class TDTestCase: tdSql.checkData(6, 2, 8) tdSql.checkData(7, 2, 8) tdSql.checkData(8, 2, 9) - tdSql.checkData(9, 2, None) - tdSql.checkData(10, 2, None) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") - tdSql.checkRows(11) + tdSql.checkRows(9) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, False) @@ -4424,9 +4397,6 @@ class TDTestCase: tdSql.checkData(6, 1, True) tdSql.checkData(7, 1, False) tdSql.checkData(8, 1, False) - tdSql.checkData(9, 1, True) - tdSql.checkData(10, 1, True) - tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 3) @@ -4437,8 +4407,6 @@ class TDTestCase: tdSql.checkData(6, 2, 8) tdSql.checkData(7, 2, 8) tdSql.checkData(8, 2, 9) - tdSql.checkData(9, 2, None) - tdSql.checkData(10, 2, None) # super table tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") @@ -4475,7 +4443,7 @@ class TDTestCase: tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(9) + tdSql.checkRows(8) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, True) @@ -4484,7 +4452,6 @@ class TDTestCase: tdSql.checkData(5, 1, True) tdSql.checkData(6, 1, False) tdSql.checkData(7, 1, False) - tdSql.checkData(8, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 9) @@ -4494,12 +4461,11 @@ class TDTestCase: tdSql.checkData(5, 2, 13) tdSql.checkData(6, 2, 13) tdSql.checkData(7, 2, 15) - tdSql.checkData(8, 2, None) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(9) + tdSql.checkRows(8) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, True) @@ -4508,7 +4474,6 @@ class TDTestCase: tdSql.checkData(5, 1, True) tdSql.checkData(6, 1, False) tdSql.checkData(7, 1, False) - tdSql.checkData(8, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 9) @@ -4518,37 +4483,36 @@ class TDTestCase: tdSql.checkData(5, 2, 13) tdSql.checkData(6, 2, 13) tdSql.checkData(7, 2, 15) - tdSql.checkData(8, 2, None) tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(18) - for i in range(0, 9): + tdSql.checkRows(15) + for i in range(0, 7): tdSql.checkData(i, 0, 'ctb1_null') - for i in range(9, 18): + for i in range(7, 15): tdSql.checkData(i, 0, 'ctb2_null') tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(7, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:15.000') tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(18) - for i in range(0, 9): + tdSql.checkRows(15) + for i in range(0, 7): tdSql.checkData(i, 0, 'ctb1_null') - for i in range(9, 18): + for i in range(7, 15): tdSql.checkData(i, 0, 'ctb2_null') tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(7, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:15.000') # fill linear # normal table From 3310e8145620ff06332d262ad2411ada14bb36ab Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 10:15:12 +0800 Subject: [PATCH 22/51] make sure mnode can be started --- source/libs/wal/src/walMeta.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 8649581d5d..042024284c 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -288,6 +288,11 @@ static void walAlignVersions(SWal* pWal) { pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); pWal->vers.snapshotVer = pWal->cfg.committed; } + if (pWal->vers.snapshotVer < 0) { + wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".", + pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer); + pWal->vers.snapshotVer = pWal->vers.firstVer; + } if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) { wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId, pWal->vers.firstVer, pWal->vers.snapshotVer); From d0a0d578bd149e82f0623f35a52378393e229391 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 02:16:31 +0000 Subject: [PATCH 23/51] fix/TS-5533-update-os-info-when-monitor --- source/dnode/mgmt/node_util/src/dmUtil.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index b50c746c92..f8c0955745 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,6 +74,7 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; + osUpdate(); pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From f206837d48f86fb3102c2e40626a7cbe5cdc6c5f Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 10:30:27 +0800 Subject: [PATCH 24/51] modify log level while walLoadMeta failed --- source/libs/wal/src/walMgmt.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c index 3b23a2db80..d8a58efe4e 100644 --- a/source/libs/wal/src/walMgmt.c +++ b/source/libs/wal/src/walMgmt.c @@ -91,7 +91,8 @@ static int32_t walInitLock(SWal *pWal) { } SWal *walOpen(const char *path, SWalCfg *pCfg) { - SWal *pWal = taosMemoryCalloc(1, sizeof(SWal)); + int32_t code = 0; + SWal *pWal = taosMemoryCalloc(1, sizeof(SWal)); if (pWal == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); return NULL; @@ -160,17 +161,20 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) { pWal->writeHead.magic = WAL_MAGIC; // load meta - if (walLoadMeta(pWal) < 0) { - wInfo("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(terrno)); + code = walLoadMeta(pWal); + if (code < 0) { + wWarn("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(code)); } - if (walCheckAndRepairMeta(pWal) < 0) { - wError("vgId:%d, cannot open wal since repair meta file failed", pWal->cfg.vgId); + code = walCheckAndRepairMeta(pWal); + if (code < 0) { + wError("vgId:%d, cannot open wal since repair meta file failed since %s", pWal->cfg.vgId, tstrerror(code)); goto _err; } - if (walCheckAndRepairIdx(pWal) < 0) { - wError("vgId:%d, cannot open wal since repair idx file failed", pWal->cfg.vgId); + code = walCheckAndRepairIdx(pWal); + if (code < 0) { + wError("vgId:%d, cannot open wal since repair idx file failed since %s", pWal->cfg.vgId, tstrerror(code)); goto _err; } From dca0822b5d424ebadbbe52c0570dee7349812e74 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 16 Oct 2024 10:47:02 +0800 Subject: [PATCH 25/51] feat: support query-QPS new feature --- tools/auto/testCompression/testCompression.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/tools/auto/testCompression/testCompression.py b/tools/auto/testCompression/testCompression.py index 281a097f8a..ee922a1a23 100644 --- a/tools/auto/testCompression/testCompression.py +++ b/tools/auto/testCompression/testCompression.py @@ -134,8 +134,6 @@ def getMatch(datatype, algo): def generateJsonFile(algo): - print(f"doTest algo: {algo} \n") - # replace datatype context = readFileContext(templateFile) # replace compress @@ -192,8 +190,6 @@ def findContextValue(context, label): ends = [',','}',']', 0] while context[end] not in ends: end += 1 - - print(f"start = {start} end={end}\n") return context[start:end] @@ -281,10 +277,10 @@ def testQuery(): # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 speed = None - for i in range(20, len(lines)): + for i in range(0, len(lines)): # find second real + context = lines[i] pos = context.find("the QPS of all threads:") - context = lines[26] if pos == -1 : continue pos += 24 @@ -302,7 +298,6 @@ def doTest(algo, resultFile): print(f"doTest algo: {algo} \n") #cleanAndStartTaosd() - # json jsonFile = generateJsonFile(algo) From de7006743627a2e561f833c2117f17b8afce0791 Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Wed, 16 Oct 2024 10:48:41 +0800 Subject: [PATCH 26/51] fix(query)[TD-32564]. Fix memory leak in exceptional cases In function tsdbTFileSetInitRef, clear all FileObj stored in the variable lvl when an error occurs, and release the memory allocated for lvl itself. --- source/dnode/vnode/src/tsdb/tsdbFSet2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbFSet2.c b/source/dnode/vnode/src/tsdb/tsdbFSet2.c index fc681f9753..a0ae58ac96 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFSet2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFSet2.c @@ -602,14 +602,14 @@ int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs SSttLvl *lvl; code = tsdbSttLvlInitRef(pTsdb, lvl1, &lvl); if (code) { - taosMemoryFree(lvl); + tsdbSttLvlClear(&lvl); tsdbTFileSetClear(fset); return code; } code = TARRAY2_APPEND(fset[0]->lvlArr, lvl); if (code) { - taosMemoryFree(lvl); + tsdbSttLvlClear(&lvl); tsdbTFileSetClear(fset); return code; } From 2520eead4842472e3f03690041d02bcaca273269 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 04:59:02 +0000 Subject: [PATCH 27/51] fix/TS-5533-update-os-info-when-monitor-fix-check --- source/dnode/mgmt/node_util/src/dmUtil.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index f8c0955745..3a6c73a1bc 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,7 +74,10 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; - osUpdate(); + code = osUpdate(); + if (code != 0) { + dError("failed to update os info since %s", tstrerror(code)); + } pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From 48d9f2da65e8aea47a03e44f30084bbac478a06a Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 13:38:45 +0800 Subject: [PATCH 28/51] fix ci walTest --- source/libs/wal/src/walMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 042024284c..92ad760a20 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -288,7 +288,7 @@ static void walAlignVersions(SWal* pWal) { pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); pWal->vers.snapshotVer = pWal->cfg.committed; } - if (pWal->vers.snapshotVer < 0) { + if (pWal->vers.snapshotVer < 0 && pWal->vers.firstVer > 0) { wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".", pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer); pWal->vers.snapshotVer = pWal->vers.firstVer; From 544a1828e6c094d4ed874c36893e44afc2986f29 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 07:00:28 +0000 Subject: [PATCH 29/51] fix/TS-5532-add-more-log-status-msg --- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index f1f3a3bee7..87b1ae0efa 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -123,6 +123,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { int32_t code = 0; SStatusReq req = {0}; + dDebug("send status req to mnode, statusSeq:%d, begin to mgnt lock", pMgmt->statusSeq); (void)taosThreadRwlockRdlock(&pMgmt->pData->lock); req.sver = tsVersion; req.dnodeVer = pMgmt->pData->dnodeVer; @@ -161,14 +162,17 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { memcpy(req.clusterCfg.charset, tsCharset, TD_LOCALE_LEN); (void)taosThreadRwlockUnlock(&pMgmt->pData->lock); + dDebug("send status req to mnode, statusSeq:%d, begin to get vnode loads", pMgmt->statusSeq); SMonVloadInfo vinfo = {0}; (*pMgmt->getVnodeLoadsFp)(&vinfo); req.pVloads = vinfo.pVloads; + dDebug("send status req to mnode, statusSeq:%d, begin to get mnode loads", pMgmt->statusSeq); SMonMloadInfo minfo = {0}; (*pMgmt->getMnodeLoadsFp)(&minfo); req.mload = minfo.load; + dDebug("send status req to mnode, statusSeq:%d, begin to get qnode loads", pMgmt->statusSeq); (*pMgmt->getQnodeLoadsFp)(&req.qload); pMgmt->statusSeq++; @@ -206,6 +210,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { int8_t epUpdated = 0; (void)dmGetMnodeEpSet(pMgmt->pData, &epSet); + dDebug("send status req to mnode, statusSeq:%d, begin to send rpc msg", pMgmt->statusSeq); code = rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000); if (code != 0) { From 9bf2c61d9357faae785e78fc7209a1b686c03745 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Wed, 16 Oct 2024 16:15:45 +0800 Subject: [PATCH 30/51] not nest --- source/libs/parser/src/parAstCreater.c | 3 ++- tests/pytest/fulltest.sh | 1 + tests/pytest/regressiontest.sh | 1 + tests/system-test/2-query/not.py | 35 +++++++++++++------------- tests/system-test/runAllOne.sh | 5 ++++ 5 files changed, 26 insertions(+), 19 deletions(-) diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 5db7e18fc5..3bb9e15182 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -887,7 +887,8 @@ _err: } static int32_t addParamToLogicConditionNode(SLogicConditionNode* pCond, SNode* pParam) { - if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType && + ((SLogicConditionNode*)pParam)->condType != LOGIC_COND_TYPE_NOT) { int32_t code = nodesListAppendList(pCond->pParameterList, ((SLogicConditionNode*)pParam)->pParameterList); ((SLogicConditionNode*)pParam)->pParameterList = NULL; nodesDestroyNode(pParam); diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 3df42cbf33..eb975ec46f 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -225,6 +225,7 @@ python3 test.py -f query/distinctOneColTb.py python3 ./test.py -f query/filter.py python3 ./test.py -f query/filterCombo.py python3 ./test.py -f query/queryNormal.py +python3 ./test.py -f query/not.py python3 ./test.py -f query/queryError.py python3 ./test.py -f query/filterAllIntTypes.py python3 ./test.py -f query/filterFloatAndDouble.py diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index b69ee37a55..e42d53ded1 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -139,6 +139,7 @@ python3 ./test.py -f query/querySort.py python3 ./test.py -f query/queryJoin.py python3 ./test.py -f query/filterCombo.py python3 ./test.py -f query/queryNormal.py +python3 ./test.py -f query/not.py python3 ./test.py -f query/select_last_crash.py python3 ./test.py -f query/queryNullValueTest.py python3 ./test.py -f query/queryInsertValue.py diff --git a/tests/system-test/2-query/not.py b/tests/system-test/2-query/not.py index a0bd1d4e1d..1254226db3 100644 --- a/tests/system-test/2-query/not.py +++ b/tests/system-test/2-query/not.py @@ -98,24 +98,23 @@ class TDTestCase: tdsql.checkData(0, 1, 10) tdsql.checkData(1, 1, 30) - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1") - # tdsql.checkRows(1) - # - # tdsql.checkData(0, 1, 20) - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1") - # tdsql.checkRows(2) - # - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1") - # tdsql.checkRows(2) - # - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1") - # tdsql.checkRows(1) - # - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1") - # tdsql.checkRows(1) - # - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1") - # tdsql.checkRows(1) + tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1") + tdsql.checkRows(1) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1") + tdsql.checkRows(2) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1") + tdsql.checkRows(2) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1") + tdsql.checkRows(1) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1") + tdsql.checkRows(1) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1") + tdsql.checkRows(1) def run(self): dbname = "db" diff --git a/tests/system-test/runAllOne.sh b/tests/system-test/runAllOne.sh index 3bb128ea28..0d65fd616b 100644 --- a/tests/system-test/runAllOne.sh +++ b/tests/system-test/runAllOne.sh @@ -245,6 +245,8 @@ python3 ./test.py -f 2-query/min.py -P python3 ./test.py -f 2-query/min.py -P -R python3 ./test.py -f 2-query/normal.py -P python3 ./test.py -f 2-query/normal.py -P -R +python3 ./test.py -f 2-query/not.py -P +python3 ./test.py -f 2-query/not.py -P -R python3 ./test.py -f 2-query/mode.py -P python3 ./test.py -f 2-query/mode.py -P -R python3 ./test.py -f 2-query/Now.py -P @@ -427,6 +429,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 2 python3 ./test.py -f 2-query/max.py -P -Q 2 python3 ./test.py -f 2-query/min.py -P -Q 2 python3 ./test.py -f 2-query/normal.py -P -Q 2 +python3 ./test.py -f 2-query/not.py -P -Q 2 python3 ./test.py -f 2-query/mode.py -P -Q 2 python3 ./test.py -f 2-query/count.py -P -Q 2 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 2 @@ -526,6 +529,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 3 python3 ./test.py -f 2-query/max.py -P -Q 3 python3 ./test.py -f 2-query/min.py -P -Q 3 python3 ./test.py -f 2-query/normal.py -P -Q 3 +python3 ./test.py -f 2-query/not.py -P -Q 3 python3 ./test.py -f 2-query/mode.py -P -Q 3 python3 ./test.py -f 2-query/count.py -P -Q 3 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 3 @@ -624,6 +628,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 4 python3 ./test.py -f 2-query/max.py -P -Q 4 python3 ./test.py -f 2-query/min.py -P -Q 4 python3 ./test.py -f 2-query/normal.py -P -Q 4 +python3 ./test.py -f 2-query/not.py -P -Q 4 python3 ./test.py -f 2-query/mode.py -P -Q 4 python3 ./test.py -f 2-query/count.py -P -Q 4 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 4 From b56701e05c9e940346dde1463ebf42bad7b7d38a Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 16 Oct 2024 16:31:30 +0800 Subject: [PATCH 31/51] fix: remove invalid error code check and add meta data recover and compact function --- source/dnode/mgmt/exe/dmMain.c | 3 + source/dnode/vnode/src/inc/vnodeInt.h | 3 + source/dnode/vnode/src/meta/metaOpen.c | 171 +++++++++++++++++++++++- source/dnode/vnode/src/meta/metaTable.c | 3 - 4 files changed, 175 insertions(+), 5 deletions(-) diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index ba162bd84f..1089b0eced 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -182,6 +182,7 @@ static void dmSetSignalHandle() { } #endif } +extern bool generateNewMeta; static int32_t dmParseArgs(int32_t argc, char const *argv[]) { global.startTime = taosGetTimestampMs(); @@ -221,6 +222,8 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) { global.dumpSdb = true; } else if (strcmp(argv[i], "-dTxn") == 0) { global.deleteTrans = true; + } else if (strcmp(argv[i], "-r") == 0) { + generateNewMeta = true; } else if (strcmp(argv[i], "-E") == 0) { if (i < argc - 1) { if (strlen(argv[++i]) >= PATH_MAX) { diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 1bd4317234..fc98d6578b 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -81,6 +81,9 @@ typedef struct SCommitInfo SCommitInfo; typedef struct SCompactInfo SCompactInfo; typedef struct SQueryNode SQueryNode; +#define VNODE_META_TMP_DIR "meta.tmp" +#define VNODE_META_BACKUP_DIR "meta.backup" + #define VNODE_META_DIR "meta" #define VNODE_TSDB_DIR "tsdb" #define VNODE_TQ_DIR "tq" diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index f062505ac7..ef36521879 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -133,7 +133,7 @@ static void doScan(SMeta *pMeta) { } } -int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { +static int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) { SMeta *pMeta = NULL; int32_t code = 0; int32_t lino; @@ -144,7 +144,11 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { // create handle vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN); offset = strlen(path); - snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, VNODE_META_DIR); + snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir); + + if (strncmp(metaDir, VNODE_META_TMP_DIR, strlen(VNODE_META_TMP_DIR)) == 0) { + taosRemoveDir(path); + } if ((pMeta = taosMemoryCalloc(1, sizeof(*pMeta) + strlen(path) + 1)) == NULL) { TSDB_CHECK_CODE(code = terrno, lino, _exit); @@ -245,6 +249,169 @@ _exit: return code; } +bool generateNewMeta = false; + +static int32_t metaGenerateNewMeta(SMeta **ppMeta) { + SMeta *pNewMeta = NULL; + SMeta *pMeta = *ppMeta; + SVnode *pVnode = pMeta->pVnode; + + metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode)); + + // Open a new meta for orgainzation + int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false); + if (code) { + return code; + } + + code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL); + if (code) { + return code; + } + + // i == 0, scan super table + // i == 1, scan normal table and child table + for (int i = 0; i < 2; i++) { + TBC *uidCursor = NULL; + int32_t counter = 0; + + code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL); + if (code) { + metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + code = tdbTbcMoveToFirst(uidCursor); + if (code) { + metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code)); + tdbTbcClose(uidCursor); + return code; + } + + for (;;) { + const void *pKey; + int kLen; + const void *pVal; + int vLen; + + if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) { + break; + } + + tb_uid_t uid = *(tb_uid_t *)pKey; + SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal; + if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table + || (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table + ) { + counter++; + if (i == 0) { + metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid); + } else { + metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, + pUidIdxVal->suid == 0 ? "normal" : "child", uid); + } + + // fetch table entry + void *value = NULL; + int valueSize = 0; + if (tdbTbGet(pMeta->pTbDb, + &(STbDbKey){ + .version = pUidIdxVal->version, + .uid = uid, + }, + sizeof(uid), &value, &valueSize) == 0) { + SDecoder dc = {0}; + SMetaEntry me = {0}; + tDecoderInit(&dc, value, valueSize); + if (metaDecodeEntry(&dc, &me) == 0) { + if (metaHandleEntry(pNewMeta, &me) != 0) { + metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid); + } + } + tDecoderClear(&dc); + } + tdbFree(value); + } + + code = tdbTbcMoveToNext(uidCursor); + if (code) { + metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + } + + tdbTbcClose(uidCursor); + } + + code = metaCommit(pNewMeta, pNewMeta->txn); + if (code) { + metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + code = metaFinishCommit(pNewMeta, pNewMeta->txn); + if (code) { + metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) { + metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + metaClose(&pNewMeta); + metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode)); + return 0; +} + +int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { + int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + if (code) { + return code; + } + + if (generateNewMeta) { + // backup the old meta + char path[TSDB_FILENAME_LEN] = {0}; + char oldMetaPath[TSDB_FILENAME_LEN] = {0}; + char newMetaPath[TSDB_FILENAME_LEN] = {0}; + char backupMetaPath[TSDB_FILENAME_LEN] = {0}; + + vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN); + snprintf(oldMetaPath, sizeof(oldMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_DIR); + snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR); + snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR); + + if (taosCheckExistFile(backupMetaPath)) { + metaError("vgId:%d backup meta already exists, please check", TD_VID(pVnode)); + return TSDB_CODE_FAILED; + } + + code = metaGenerateNewMeta(ppMeta); + if (code) { + metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + + metaClose(ppMeta); + if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { + metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + + // rename the new meta to old meta + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); + if (code) { + metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + } + + return TSDB_CODE_SUCCESS; +} + int32_t metaUpgrade(SVnode *pVnode, SMeta **ppMeta) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino; diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 08ee422126..21d12ef77d 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2985,9 +2985,6 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { } } end: - if (terrno != 0) { - ret = terrno; - } tDecoderClear(&dc); tdbFree(pData); return ret; From 75650908ba622638ce98415b894d322769349934 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 09:17:21 +0000 Subject: [PATCH 32/51] fix/TS-5533-revert-add-osupdate-when-monitor --- source/dnode/mgmt/node_util/src/dmUtil.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index 3a6c73a1bc..b50c746c92 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,10 +74,6 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; - code = osUpdate(); - if (code != 0) { - dError("failed to update os info since %s", tstrerror(code)); - } pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From 1d018d0d287cc428cdf3656e964e6c492784fd1e Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 16 Oct 2024 18:05:35 +0800 Subject: [PATCH 33/51] enh: stmt2Perf add p90 p95 min max --- .../auto/stmt2Performance/json/template.json | 6 +- tools/auto/stmt2Performance/stmt2Perf.py | 110 +++++++++++++----- 2 files changed, 86 insertions(+), 30 deletions(-) diff --git a/tools/auto/stmt2Performance/json/template.json b/tools/auto/stmt2Performance/json/template.json index 659c5966a4..8c54c5be22 100644 --- a/tools/auto/stmt2Performance/json/template.json +++ b/tools/auto/stmt2Performance/json/template.json @@ -16,14 +16,14 @@ { "dbinfo": { "name": "dbrate", - "drop": "yes", - "vgroups": 2 + "vgroups": 1, + "drop": "yes" }, "super_tables": [ { "name": "meters", "child_table_exists": "no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "d", "insert_mode": "@STMT_MODE", "interlace_rows": @INTERLACE_MODE, diff --git a/tools/auto/stmt2Performance/stmt2Perf.py b/tools/auto/stmt2Performance/stmt2Perf.py index e7a4d5ecbe..4d99f2483d 100644 --- a/tools/auto/stmt2Performance/stmt2Perf.py +++ b/tools/auto/stmt2Performance/stmt2Perf.py @@ -198,16 +198,20 @@ def findContextValue(context, label): def writeTemplateInfo(resultFile): # create info - context = readFileContext(templateFile) + context = readFileContext(templateFile) vgroups = findContextValue(context, "vgroups") childCount = findContextValue(context, "childtable_count") insertRows = findContextValue(context, "insert_rows") - line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n" + bindVGroup = findContextValue(context, "thread_bind_vgroup") + nThread = findContextValue(context, "thread_count") + if bindVGroup.lower().find("yes") != -1: + nThread = vgroups + line = f"thread_bind_vgroup = {bindVGroup}\nvgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\ninsertThreads = {nThread} \n\n" print(line) appendFileContext(resultFile, line) -def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): +def totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed): global Number # flush command = 'taos -s "flush database dbrate;"' @@ -220,7 +224,7 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): # read compress rate command = 'taos -s "show table distributed dbrate.meters\G;"' rets = runRetList(command) - print(rets) + #print(rets) str1 = rets[5] arr = str1.split(" ") @@ -234,46 +238,88 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): str2 = arr[6] pos = str2.find("=[") rate = str2[pos+2:] - print("rate =" + rate) # total data file size #dataSize = getFolderSize(f"{dataDir}/vnode/") #dataSizeMB = int(dataSize/1024/1024) # appand to file - + + # %("No", "stmtMode", "interlaceRows", "spent", "spent-real", "writeSpeed", "write-real", "query-QPS", "dataSize", "rate") Number += 1 - context = "%10s %10s %15s %10s %10s %30s %15s\n"%( Number, stmt, interlace, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed) + ''' + context = "%2s %6s %10s %10s %10s %15s %15s %16s %16s %16s %16s %16s %8s %8s %8s\n"%( + Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " rows/s", writeReal + " rows/s", + min, avg, p90, p99, max, + querySpeed, str(totalSize) + " MB", rate + "%") + ''' + context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%( + Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + "r/s", writeReal + "r/s", + min, avg, p90, p99, max + "ms", + querySpeed, str(totalSize) + " MB", rate + "%") + showLog(context) appendFileContext(resultFile, context) +def cutEnd(line, start, endChar): + pos = line.find(endChar, start) + if pos == -1: + return line[start:] + return line[start : pos] + +def findValue(context, pos, key, endChar,command): + pos = context.find(key, pos) + if pos == -1: + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") + exit(1) + pos += len(key) + value = cutEnd(context, pos, endChar) + return (value, pos) + def testWrite(jsonFile): command = f"taosBenchmark -f {jsonFile}" output, context = run(command, 60000) + print(context) + # SUCC: Spent 0.960248 (real 0.947154) seconds to insert rows: 100000 with 1 thread(s) into dbrate 104139.76 (real 105579.45) records/second - # find second real - pos = context.find("(real ") + # spent + key = "Spent " + pos = -1 + pos1 = 0 + while pos1 != -1: # find last "Spent " + pos1 = context.find(key, pos1) + if pos1 != -1: + pos = pos1 # update last found + pos1 += len(key) if pos == -1: - print(f"error, run command={command} output not found first \"(real\" keyword. error={context}") + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") exit(1) - pos = context.find("(real ", pos + 5) + pos += len(key) + spent = cutEnd(context, pos, ".") + + # spent-real + spentReal, pos = findValue(context, pos, "(real ", ".", command) + + # writeSpeed + key = "into " + pos = context.find(key, pos) if pos == -1: - print(f"error, run command={command} output not found second \"(real\" keyword. error={context}") - exit(1) - - pos += 5 - length = len(context) - while pos < length and context[pos] == ' ': - pos += 1 - end = context.find(".", pos) - if end == -1: - print(f"error, run command={command} output not found second \".\" keyword. error={context}") + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") exit(1) + pos += len(key) + writeSpeed, pos = findValue(context, pos, " ", ".", command) + # writeReal + writeReal, pos = findValue(context, pos, "(real ", ".", command) - speed = context[pos: end] - #print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n") - return speed + # delay + min, pos = findValue(context, pos, "min: ", ",", command) + avg, pos = findValue(context, pos, "avg: ", ",", command) + p90, pos = findValue(context, pos, "p90: ", ",", command) + p99, pos = findValue(context, pos, "p99: ", ",", command) + max, pos = findValue(context, pos, "max: ", "ms", command) + + return (spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max) def testQuery(): command = f"taosBenchmark -f json/query.json" @@ -308,13 +354,13 @@ def doTest(stmt, interlace, resultFile): # run taosBenchmark t1 = time.time() - writeSpeed = testWrite(jsonFile) + spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max = testWrite(jsonFile) t2 = time.time() # total write speed querySpeed = testQuery() # total compress rate - totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed) + totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed) def main(): @@ -333,7 +379,17 @@ def main(): # json info writeTemplateInfo(resultFile) # head - context = "\n%10s %10s %15s %10s %10s %30s %15s\n"%("No", "stmtMode", "interlaceRows", "dataSize", "rate", "writeSpeed", "query-QPS") + ''' + context = "%3s %8s %10s %10s %10s %15s %15s %10s %10s %10s %10s %10s %8s %8s %8s\n"%( + "No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real", + "min", "avg", "p90", "p99", "max", + "query-QPS", "dataSize", "rate") + ''' + context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%( + "No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real", + "min", "avg", "p90", "p99", "max", + "query-QPS", "dataSize", "rate") + appendFileContext(resultFile, context) From 27c087e9aec5bd1a499a4c2e545c283d7536b727 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 16 Oct 2024 18:36:49 +0800 Subject: [PATCH 34/51] refactor: do some internal refactor. --- source/dnode/mnode/impl/src/mndStream.c | 3 ++- source/dnode/mnode/impl/src/mndStreamTransAct.c | 9 +++------ source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 +- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 69d3de25fc..a4327b777f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1783,7 +1783,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; - int32_t code = 0; + int32_t code = 0; if ((code = grantCheckExpire(TSDB_GRANT_STREAMS)) < 0) { return code; @@ -1811,6 +1811,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { return 0; } + mInfo("stream:%s,%" PRId64 " start to resume stream from pause", resumeReq.name, pStream->uid); if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { sdbRelease(pMnode->pSdb, pStream); return -1; diff --git a/source/dnode/mnode/impl/src/mndStreamTransAct.c b/source/dnode/mnode/impl/src/mndStreamTransAct.c index 4e0bf97587..139ea4f147 100644 --- a/source/dnode/mnode/impl/src/mndStreamTransAct.c +++ b/source/dnode/mnode/impl/src/mndStreamTransAct.c @@ -61,7 +61,6 @@ static int32_t doSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa static int32_t doSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) { SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq)); if (pReq == NULL) { - // terrno = TSDB_CODE_OUT_OF_MEMORY; return terrno; } @@ -93,7 +92,6 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT if (pReq == NULL) { mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq), tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - // terrno = TSDB_CODE_OUT_OF_MEMORY; return terrno; } @@ -106,19 +104,18 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT bool hasEpset = false; int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); if (code != TSDB_CODE_SUCCESS || (!hasEpset)) { - terrno = code; taosMemoryFree(pReq); - return terrno; + return code; } code = setTransAction(pTrans, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID); if (code != 0) { taosMemoryFree(pReq); - return terrno; + return code; } mDebug("set the resume action for trans:%d", pTrans->id); - return 0; + return code; } static int32_t doSetDropActionFromId(SMnode *pMnode, STrans *pTrans, SOrphanTask* pTask) { diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 44a39f4328..c4971e27cf 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -5910,7 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_ } else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing } else { code = TSDB_CODE_INVALID_PARA; - tsdbError("invalid mr.me.type:%d %s, code:%s", mr.me.type, tstrerror(code)); + tsdbError("invalid mr.me.type:%d, code:%s", mr.me.type, tstrerror(code)); metaReaderClear(&mr); return code; } From e893547c1471325b9d93a273d23f36d55c346a3f Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 10:58:26 +0000 Subject: [PATCH 35/51] fix/TD-32583-remove-useless-timer-execution --- source/libs/sync/inc/syncEnv.h | 1 - source/libs/sync/src/syncMain.c | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/source/libs/sync/inc/syncEnv.h b/source/libs/sync/inc/syncEnv.h index 0376920e8a..caf0e88457 100644 --- a/source/libs/sync/inc/syncEnv.h +++ b/source/libs/sync/inc/syncEnv.h @@ -24,7 +24,6 @@ extern "C" { #define TIMER_MAX_MS 0x7FFFFFFF #define PING_TIMER_MS 5000 -#define HEARTBEAT_TICK_NUM 20 typedef struct SSyncEnv { uint8_t isStart; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 451e82c7d4..3d37cdb560 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -977,9 +977,10 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) { pData->logicClock = pSyncTimer->logicClock; pData->execTime = tsNow + pSyncTimer->timerMS; - sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, pData->rid, pData->destId.addr); + sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64 " at %d", pSyncNode->vgId, pData->rid, + pData->destId.addr, pSyncTimer->timerMS); - TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)(pData->rid), + TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid), syncEnv()->pTimerManager, &pSyncTimer->pTimer)); } else { code = TSDB_CODE_SYN_INTERNAL_ERROR; @@ -2711,7 +2712,8 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { return; } - sTrace("vgId:%d, eq peer hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, pData->destId.addr); + sTrace("vgId:%d, peer hb timer execution, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, + pData->destId.addr); if (pSyncNode->totalReplicaNum > 1) { int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock); @@ -2753,13 +2755,12 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { if (ret != 0) { sError("vgId:%d, failed to send heartbeat since %s", pSyncNode->vgId, tstrerror(ret)); } - } else { } if (syncIsInit()) { - // sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId); - if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, - (void*)hbDataRid, syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { + sTrace("vgId:%d, reset peer hb timer at %d", pSyncNode->vgId, pSyncTimer->timerMS); + if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid, + syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code)); syncNodeRelease(pSyncNode); syncHbTimerDataRelease(pData); From 71a762db7526de763b615044ff7ffb893bacb4a3 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 16 Oct 2024 19:32:41 +0800 Subject: [PATCH 36/51] add more error handle --- source/dnode/vnode/src/meta/metaOpen.c | 77 ++++++++++++++++---------- source/libs/tdb/src/db/tdbBtree.c | 3 + 2 files changed, 51 insertions(+), 29 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index ef36521879..8f2c0b5a5e 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -364,13 +364,7 @@ static int32_t metaGenerateNewMeta(SMeta **ppMeta) { } int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { - int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); - if (code) { - return code; - } - if (generateNewMeta) { - // backup the old meta char path[TSDB_FILENAME_LEN] = {0}; char oldMetaPath[TSDB_FILENAME_LEN] = {0}; char newMetaPath[TSDB_FILENAME_LEN] = {0}; @@ -381,32 +375,57 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR); snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR); - if (taosCheckExistFile(backupMetaPath)) { - metaError("vgId:%d backup meta already exists, please check", TD_VID(pVnode)); + bool oldMetaExist = taosCheckExistFile(oldMetaPath); + bool newMetaExist = taosCheckExistFile(newMetaPath); + bool backupMetaExist = taosCheckExistFile(backupMetaPath); + + if ((!backupMetaExist && !oldMetaExist && newMetaExist) // case 2 + || (backupMetaExist && !oldMetaExist && !newMetaExist) // case 4 + || (backupMetaExist && oldMetaExist && newMetaExist) // case 8 + ) { + metaError("vgId:%d invalid meta state, please check", TD_VID(pVnode)); return TSDB_CODE_FAILED; + } else if ((backupMetaExist && oldMetaExist && !newMetaExist) // case 7 + || (!backupMetaExist && !oldMetaExist && !newMetaExist) // case 1 + ) { + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + } else if (backupMetaExist && !oldMetaExist && newMetaExist) { + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + } else { + int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + if (code) { + return code; + } + + code = metaGenerateNewMeta(ppMeta); + if (code) { + metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + + metaClose(ppMeta); + if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { + metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + + // rename the new meta to old meta + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); + if (code) { + metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } } - code = metaGenerateNewMeta(ppMeta); - if (code) { - metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); - } - - metaClose(ppMeta); - if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { - metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - - // rename the new meta to old meta - if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { - metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); - if (code) { - metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); - return code; - } + } else { + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); } return TSDB_CODE_SUCCESS; diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index c688a6cc6a..2333a4a6a2 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1446,6 +1446,9 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, return ret; } ofpCell = tdbPageGetCell(ofp, 0); + if (ofpCell == NULL) { + return TSDB_CODE_INVALID_DATA_FMT; + } if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { bytes = nLeft; From d63040f2f77e48cd67593e29059009f36b731c34 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 16 Oct 2024 19:41:29 +0800 Subject: [PATCH 37/51] TS-5459: add test case --- tests/system-test/2-query/slow_query_basic.py | 66 +++++++++++++++++++ tests/system-test/win-test-file | 1 + 2 files changed, 67 insertions(+) create mode 100644 tests/system-test/2-query/slow_query_basic.py diff --git a/tests/system-test/2-query/slow_query_basic.py b/tests/system-test/2-query/slow_query_basic.py new file mode 100644 index 0000000000..10595028a7 --- /dev/null +++ b/tests/system-test/2-query/slow_query_basic.py @@ -0,0 +1,66 @@ +import random +import string +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +import numpy as np + + +class TDTestCase: + updatecfgDict = {'slowLogThresholdTest': ''} + updatecfgDict["slowLogThresholdTest"] = 0 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def getPath(self, tool="taosBenchmark"): + if (platform.system().lower() == 'windows'): + tool = tool + ".exe" + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def taosBenchmark(self, param): + binPath = self.getPath() + cmd = f"{binPath} {param}" + tdLog.info(cmd) + os.system(cmd) + + def testSlowQuery(self): + self.taosBenchmark(" -d db -t 2 -v 2 -n 1000000 -y") + sql = "select count(*) from db.meters" + for i in range(10): + tdSql.query(sql) + tdSql.checkData(0, 0, 2 * 1000000) + + def run(self): + self.testSlowQuery() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index e86047bca8..c3047efdd7 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -925,3 +925,4 @@ python3 ./test.py -f 99-TDcase/TD-20582.py python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3 python3 ./test.py -f eco-system/meta/database/keep_time_offset.py +python3 ./test.py -f 2-query/slow_query_basic.py From dd05353b74e2b7b84fdaf42adaacb164b53a0fd2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 16 Oct 2024 22:07:37 +0800 Subject: [PATCH 38/51] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 2 +- source/dnode/vnode/src/tqCommon/tqCommon.c | 2 +- source/libs/stream/src/streamCheckStatus.c | 2 +- source/libs/stream/src/streamCheckpoint.c | 3 ++- source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamMeta.c | 5 +++-- source/libs/stream/src/streamSched.c | 4 ++-- source/libs/stream/src/streamTask.c | 3 +++ 8 files changed, 14 insertions(+), 9 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index e6d750468e..58c1707e1f 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -754,7 +754,7 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); -void streamMetaAcquireOneTask(SStreamTask* pTask); +int32_t streamMetaAcquireOneTask(SStreamTask* pTask); void streamMetaClear(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 3871011407..a00e92997c 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -692,7 +692,7 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if ((ppTask != NULL) && ((*ppTask) != NULL)) { - streamMetaAcquireOneTask(*ppTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask); SStreamTask* pTask = *ppTask; if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index 75bcc326b3..c1c54b3c0b 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -299,7 +299,7 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) { return; } - /*SStreamTask* p = */ streamMetaAcquireOneTask(pTask); // add task ref here + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs()); int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index e44bca123b..be914d9746 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -347,7 +347,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock if (old == 0) { int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref); - streamMetaAcquireOneTask(pTask); + + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); pTmrInfo->launchChkptId = pActiveInfo->activeId; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 133663ac28..62d60ff664 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -1162,7 +1162,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) { if (old == 0) { int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref); - streamMetaAcquireOneTask(pTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 29152c6205..7e9b60b61a 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -753,9 +753,10 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task return code; } -void streamMetaAcquireOneTask(SStreamTask* pTask) { +int32_t streamMetaAcquireOneTask(SStreamTask* pTask) { int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref); + return ref; } void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { @@ -866,7 +867,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (ppTask) { pTask = *ppTask; - // it is an fill-history task, remove the related stream task's id that points to it + // it is a fill-history task, remove the related stream task's id that points to it if (pTask->info.fillHistory == 0) { int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1); } diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 095a5af6d4..cdaa603e38 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -22,7 +22,7 @@ static void streamTaskSchedHelper(void* param, void* tmrId); void streamSetupScheduleTrigger(SStreamTask* pTask) { int64_t delaySchema = pTask->info.delaySchedParam; if (delaySchema != 0 && pTask->info.fillHistory == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); + int32_t ref = streamMetaAcquireOneTask(pTask); stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref, pTask->info.delaySchedParam); @@ -80,7 +80,7 @@ void streamTaskResumeInFuture(SStreamTask* pTask) { pTask->status.schedIdleTime, ref); // add one ref count for task - streamMetaAcquireOneTask(pTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer, pTask->pMeta->vgId, "resume-task-tmr"); } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 71a2ed3e4a..727701e03e 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -258,10 +258,12 @@ void tFreeStreamTask(SStreamTask* pTask) { if (pTask->inputq.queue) { streamQueueClose(pTask->inputq.queue, pTask->id.taskId); + pTask->inputq.queue = NULL; } if (pTask->outputq.queue) { streamQueueClose(pTask->outputq.queue, pTask->id.taskId); + pTask->outputq.queue = NULL; } if (pTask->exec.qmsg) { @@ -275,6 +277,7 @@ void tFreeStreamTask(SStreamTask* pTask) { if (pTask->exec.pWalReader != NULL) { walCloseReader(pTask->exec.pWalReader); + pTask->exec.pWalReader = NULL; } streamClearChkptReadyMsg(pTask->chkInfo.pActiveInfo); From 9c7b925613367bc795a30169e48ff6aa2c84014f Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Wed, 16 Oct 2024 18:20:19 +0800 Subject: [PATCH 39/51] fix(query)[TD-30667]. Check hardware support for AVX instructions Modify the CMake script to check for hardware support of AVX instructions directly, instead of relying on compiler flags for the verification. --- cmake/cmake.define | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index 9fae397363..7bcd400fa6 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -169,11 +169,48 @@ ELSE () SET(COMPILER_SUPPORT_AVX512VL false) ELSE() CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA) - CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX) - CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2) CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F) CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI) CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL) + + INCLUDE(CheckCSourceRuns) + SET(CMAKE_REQUIRED_FLAGS "-mavx") + check_c_source_runs(" + #include + int main() { + __m256d a, b, c; + double buf[4] = {0}; + a = _mm256_loadu_pd(buf); + b = _mm256_loadu_pd(buf); + c = _mm256_add_pd(a, b); + _mm256_storeu_pd(buf, c); + for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { + if (buf[i] != 0) { + return 1; + } + } + return 0; + } + " COMPILER_SUPPORT_AVX) + + SET(CMAKE_REQUIRED_FLAGS "-mavx2") + check_c_source_runs(" + #include + int main() { + __m256i a, b, c; + int buf[8] = {0}; + a = _mm256_loadu_si256((__m256i *)buf); + b = _mm256_loadu_si256((__m256i *)buf); + c = _mm256_and_si256(a, b); + _mm256_storeu_si256((__m256i *)buf, c); + for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { + if (buf[i] != 0) { + return 1; + } + } + return 0; + } + " COMPILER_SUPPORT_AVX2) ENDIF() IF (COMPILER_SUPPORT_SSE42) From 9bc38af7edf5dab39006537f034dd4338c91ab0d Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 17 Oct 2024 09:42:36 +0800 Subject: [PATCH 40/51] fix:[TD-32585]remove clean up app info in taos_cleanup --- source/client/src/clientMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index a35c7c7a4c..1a66266000 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -84,7 +84,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); - cleanupAppInfo(); +// cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); From 9aaab9c3b0b8b70dd393df4590482f3a22f11f07 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 17 Oct 2024 10:39:59 +0800 Subject: [PATCH 41/51] feat(stream):add max delay check --- source/libs/parser/src/parTranslater.c | 13 +++++++++++++ source/libs/stream/src/streamState.c | 1 + tests/script/tsim/stream/basic2.sim | 13 +++++++++++++ 3 files changed, 27 insertions(+) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 5c6f619397..4c9c559457 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -10609,6 +10609,19 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm "Non window query only support scalar function, aggregate function is not allowed"); } + if (NULL != pStmt->pOptions->pDelay) { + SValueNode* pVal = (SValueNode*)pStmt->pOptions->pDelay; + int64_t minDelay = 0; + char* str = "5s"; + if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && TSDB_CODE_SUCCESS == + parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) { + if (pVal->datum.i < minDelay) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "stream max delay must be bigger than 5 session"); + } + } + } + return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 1994c882aa..45a36bd451 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -477,6 +477,7 @@ int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal if (!pStr) { if (onlyCache && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) { (*pWinCode) = TSDB_CODE_FAILED; + goto _end; } (*pWinCode) = streamStateGetParName_rocksdb(pState, groupId, pVal); if ((*pWinCode) == TSDB_CODE_SUCCESS && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) { diff --git a/tests/script/tsim/stream/basic2.sim b/tests/script/tsim/stream/basic2.sim index ad655f2d16..2bef1c5c4c 100644 --- a/tests/script/tsim/stream/basic2.sim +++ b/tests/script/tsim/stream/basic2.sim @@ -133,4 +133,17 @@ if $data13 != -111 then goto loop1 endi +print step 2==================== + +sql create database test vgroups 1 ; +sql use test; +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql_error create stream streams1 trigger max_delay 4000a ignore update 0 ignore expired 0 into streamtST1 as select _wstart, count(*) from st interval(5s); +sql_error create stream streams2 trigger max_delay 4s ignore update 0 ignore expired 0 into streamtST2 as select _wstart, count(*) from st interval(5s); +sql create stream streams3 trigger max_delay 5000a ignore update 0 ignore expired 0 into streamtST3 as select _wstart, count(*) from st interval(5s); +sql create stream streams4 trigger max_delay 5s ignore update 0 ignore expired 0 into streamtST4 as select _wstart, count(*) from st interval(5s); + system sh/exec.sh -n dnode1 -s stop -x SIGINT From 8930252f97cab2342d1342c0f18af5a914c020fb Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 17 Oct 2024 11:22:24 +0800 Subject: [PATCH 42/51] fix: tag filed snprintf lenth error --- source/libs/command/src/command.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 27a43f7523..95c73763bf 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -551,7 +551,7 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) { (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, sizeof(type) - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type); } } From c8e8cb06976734e7894a9dddd5ed9c811d5bd7e2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 17 Oct 2024 11:48:00 +0800 Subject: [PATCH 43/51] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 2 +- source/dnode/vnode/src/tq/tq.c | 6 +- source/dnode/vnode/src/tq/tqSink.c | 528 +++++++++++++++------------- source/libs/stream/src/streamTask.c | 2 +- 4 files changed, 296 insertions(+), 242 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 58c1707e1f..a189cee0bb 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -236,7 +236,7 @@ typedef struct { void* vnode; // not available to encoder and decoder FTbSink* tbSinkFunc; STSchema* pTSchema; - SSHashObj* pTblInfo; + SSHashObj* pTbInfo; } STaskSinkTb; typedef struct { diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index a37a9787c9..b75baea08d 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -746,13 +746,13 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV return terrno; } - pOutputInfo->tbSink.pTblInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); - if (pOutputInfo->tbSink.pTblInfo == NULL) { + pOutputInfo->tbSink.pTbInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); + if (pOutputInfo->tbSink.pTbInfo == NULL) { tqError("vgId:%d failed init sink tableInfo, code:%s", vgId, tstrerror(terrno)); return terrno; } - tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTblInfo, freePtr); + tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTbInfo, freePtr); } if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 6daa9213aa..be41f7e99e 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -18,6 +18,8 @@ #include "tmsg.h" #include "tq.h" +#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) + typedef struct STableSinkInfo { uint64_t uid; tstr name; @@ -35,16 +37,22 @@ static int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema int64_t earlyTs, const char* id); static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo, const char* dstTableName, int64_t* uid); -static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, - const char* id); -static int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id); + static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid); static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName, int32_t numOfTags); static int32_t createDefaultTagColName(SArray** pColNameList); -static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, - int64_t gid, bool newSubTableRule); -static int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo); +static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, + const char* stbFullName, int64_t gid, bool newSubTableRule); +static int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo); +static int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, + const char* id); +static bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo); +static int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id); +static int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode); +static void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs); +static int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode, + int64_t earlyTs); int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq, const char* pIdStr, bool newSubTableRule) { @@ -81,7 +89,8 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p memcpy(name, varDataVal(varTbName), varDataLen(varTbName)); name[varDataLen(varTbName)] = '\0'; - if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 && stbFullName) { + if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 && + stbFullName) { int32_t code = buildCtbNameAddGroupId(stbFullName, name, groupId, cap); if (code != TSDB_CODE_SUCCESS) { return code; @@ -161,16 +170,6 @@ end: return ret; } -static bool tqGetTableInfo(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) { - void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t)); - if (pVal) { - *pInfo = *(STableSinkInfo**)pVal; - return true; - } - - return false; -} - static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) { void* buf = NULL; int32_t tlen = 0; @@ -201,7 +200,7 @@ int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const int32_t code = tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (code == 0) { pCreateTableReq->ctb.stbName = taosStrdup((char*)tNameGetTableName(&name)); - if (pCreateTableReq->ctb.stbName == NULL) { // ignore this error code + if (pCreateTableReq->ctb.stbName == NULL) { // ignore this error code tqError("failed to duplicate the stb name:%s, failed to init create-table msg and create req table", stbFullName); code = terrno; } @@ -231,7 +230,7 @@ int32_t createDefaultTagColName(SArray** pColNameList) { } int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, - int64_t gid, bool newSubTableRule) { + int64_t gid, bool newSubTableRule) { if (pDataBlock->info.parTbName[0]) { if (newSubTableRule && !isAutoTableName(pDataBlock->info.parTbName) && !alreadyAddGroupId(pDataBlock->info.parTbName, gid) && gid != 0 && stbFullName) { @@ -245,18 +244,17 @@ int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* if (code != TSDB_CODE_SUCCESS) { return code; } -// tqDebug("gen name from:%s", pDataBlock->info.parTbName); + // tqDebug("gen name from:%s", pDataBlock->info.parTbName); } else { pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName); if (pCreateTableReq->name == NULL) { return terrno; } -// tqDebug("copy name:%s", pDataBlock->info.parTbName); + // tqDebug("copy name:%s", pDataBlock->info.parTbName); } } else { int32_t code = buildCtbNameByGroupId(stbFullName, gid, &pCreateTableReq->name); return code; -// tqDebug("gen name from stbFullName:%s gid:%"PRId64, stbFullName, gid); } return 0; @@ -264,16 +262,20 @@ int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask, int64_t suid) { - STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; - int32_t rows = pDataBlock->info.rows; - SArray* tagArray = taosArrayInit(4, sizeof(STagVal)); - const char* id = pTask->id.idStr; - int32_t vgId = pTask->pMeta->vgId; - int32_t code = 0; + STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; + int32_t rows = pDataBlock->info.rows; + SArray* tagArray = NULL; + const char* id = pTask->id.idStr; + int32_t vgId = pTask->pMeta->vgId; + int32_t code = 0; + STableSinkInfo* pInfo = NULL; + SVCreateTbBatchReq reqs = {0}; + SArray* crTblArray = NULL; tqDebug("s-task:%s build create %d table(s) msg", id, rows); - SVCreateTbBatchReq reqs = {0}; - SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq)); + + tagArray = taosArrayInit(4, sizeof(STagVal)); + crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq)); if ((NULL == reqs.pArray) || (tagArray == NULL)) { tqError("s-task:%s failed to init create table msg, code:%s", id, tstrerror(terrno)); code = terrno; @@ -291,6 +293,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S tqError("s-task:%s vgId:%d failed to init create table msg", id, vgId); continue; } + taosArrayClear(tagArray); if (size == 2) { @@ -356,8 +359,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S } } - code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, - pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1); + code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, IS_NEW_SUBTB_RULE(pTask)); if (code) { goto _end; } @@ -368,16 +370,15 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S goto _end; } - STableSinkInfo* pInfo = NULL; - bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, gid, &pInfo); + bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, gid, &pInfo); if (!alreadyCached) { - code = doCreateSinkInfo(pCreateTbReq->name, &pInfo); + code = doCreateSinkTableInfo(pCreateTbReq->name, &pInfo); if (code) { tqError("vgId:%d failed to create sink tableInfo for table:%s, s-task:%s", vgId, pCreateTbReq->name, id); continue; } - code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pInfo, gid, id); + code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pInfo, gid, id); if (code) { tqError("vgId:%d failed to put sink tableInfo:%s into cache, s-task:%s", vgId, pCreateTbReq->name, id); } @@ -465,45 +466,45 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c k += 1; } else { - // check for the existance of primary key - if (pNewRow->numOfPKs == 0) { + // check for the existance of primary key + if (pNewRow->numOfPKs == 0) { + void* p = taosArrayPush(pFinal, &pNewRow); + if (p == NULL) { + return terrno; + } + + k += 1; + j += 1; + tRowDestroy(pOldRow); + } else { + numOfPk = pNewRow->numOfPKs; + + SRowKey kNew, kOld; + tRowGetKey(pNewRow, &kNew); + tRowGetKey(pOldRow, &kOld); + + int32_t ret = tRowKeyCompare(&kNew, &kOld); + if (ret <= 0) { void* p = taosArrayPush(pFinal, &pNewRow); if (p == NULL) { return terrno; } - k += 1; j += 1; - tRowDestroy(pOldRow); - } else { - numOfPk = pNewRow->numOfPKs; - - SRowKey kNew, kOld; - tRowGetKey(pNewRow, &kNew); - tRowGetKey(pOldRow, &kOld); - - int32_t ret = tRowKeyCompare(&kNew, &kOld); - if (ret <= 0) { - void* p = taosArrayPush(pFinal, &pNewRow); - if (p == NULL) { - return terrno; - } - - j += 1; - - if (ret == 0) { - k += 1; - tRowDestroy(pOldRow); - } - } else { - void* p = taosArrayPush(pFinal, &pOldRow); - if (p == NULL) { - return terrno; - } + if (ret == 0) { k += 1; + tRowDestroy(pOldRow); } + } else { + void* p = taosArrayPush(pFinal, &pOldRow); + if (p == NULL) { + return terrno; + } + + k += 1; } + } } } @@ -527,8 +528,8 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c taosArrayDestroy(pExisted->aRowP); pExisted->aRowP = pFinal; - tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", - id, (int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL), + tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id, + (int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL), (pNew->pCreateTbReq != NULL)); tdDestroySVCreateTbReq(pNew->pCreateTbReq); @@ -727,7 +728,7 @@ int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema, SSDat dataIndex++; } else { void* colData = colDataGetData(pColData, j); - if (IS_VAR_DATA_TYPE(pCol->type)) { // address copy, no value + if (IS_VAR_DATA_TYPE(pCol->type)) { // address copy, no value SValue sv = (SValue){.type = pCol->type, .nData = varDataLen(colData), .pData = (uint8_t*)varDataVal(colData)}; SColVal cv = COL_VAL_VALUE(pCol->colId, sv); @@ -806,7 +807,7 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI return TSDB_CODE_SUCCESS; } -int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo) { +int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo) { int32_t nameLen = strlen(pDstTableName); (*pInfo) = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1); if (*pInfo == NULL) { @@ -830,7 +831,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat STableSinkInfo* pTableSinkInfo = NULL; int32_t code = 0; - bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, groupId, &pTableSinkInfo); + bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, groupId, &pTableSinkInfo); if (alreadyCached) { if (dstTableName[0] == 0) { // data block does not set the destination table name @@ -870,7 +871,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat } } - code = doCreateSinkInfo(dstTableName, &pTableSinkInfo); + code = doCreateSinkTableInfo(dstTableName, &pTableSinkInfo); if (code == 0) { tqDebug("s-task:%s build new sinkTableInfo to add cache, dstTable:%s", id, dstTableName); } else { @@ -906,14 +907,14 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat SArray* pTagArray = taosArrayInit(pTSchema->numOfCols + 1, sizeof(STagVal)); if (pTagArray == NULL) { + tqError("s-task:%s failed to build auto create submit msg in sink, vgId:%d, due to %s", id, vgId, + tstrerror(terrno)); return terrno; } pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE; - code = - buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray, - (pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1), - &pTableData->pCreateTbReq); + code = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray, + IS_NEW_SUBTB_RULE(pTask), &pTableData->pCreateTbReq); taosArrayDestroy(pTagArray); if (code) { @@ -923,12 +924,12 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat } pTableSinkInfo->uid = 0; - code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id); + code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id); } else { metaReaderClear(&mr); - tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id, - vgId, dstTableName); + tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id, vgId, + dstTableName); return TSDB_CODE_TDB_TABLE_NOT_EXIST; } } else { @@ -944,7 +945,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat pTableSinkInfo->uid = mr.me.uid; metaReaderClear(&mr); - code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id); + code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id); } } } @@ -952,8 +953,8 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat return code; } -int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, - SSubmitTbData* pTableData, int64_t earlyTs, const char* id) { +int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, + SSubmitTbData* pTableData, int64_t earlyTs, const char* id) { int32_t numOfRows = pDataBlock->info.rows; char* dstTableName = pDataBlock->info.parTbName; @@ -975,6 +976,43 @@ int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_ return code; } +int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode) { + int32_t code = TSDB_CODE_SUCCESS; + const char* id = pTask->id.idStr; + STaskOutputInfo* pOutputInfo = &pTask->outputInfo; + int32_t vgId = pTask->pMeta->vgId; + + if (pTask->outputInfo.tbSink.pTagSchema == NULL) { + SMetaReader mer1 = {0}; + metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK); + + code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid); + if (code != TSDB_CODE_SUCCESS) { + tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId); + metaReaderClear(&mer1); + return code; + } + + pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag); + metaReaderClear(&mer1); + + if (pOutputInfo->tbSink.pTagSchema == NULL) { + tqError("s-task:%s failed to clone tag schema, code:%s, failed to sink results", id, tstrerror(terrno)); + return terrno; + } + + SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema; + SSchema* pCol1 = &pTagSchema->pSchema[0]; + if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) { + pOutputInfo->tbSink.autoCreateCtb = true; + } else { + pOutputInfo->tbSink.autoCreateCtb = false; + } + } + + return code; +} + void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { const SArray* pBlocks = (const SArray*)data; SVnode* pVnode = (SVnode*)vnode; @@ -988,27 +1026,9 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { int64_t earlyTs = tsdbGetEarliestTs(pVnode->pTsdb); STaskOutputInfo* pOutputInfo = &pTask->outputInfo; - if (pTask->outputInfo.tbSink.pTagSchema == NULL) { - SMetaReader mer1 = {0}; - metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK); - - code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid); - if (code != TSDB_CODE_SUCCESS) { - tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId); - metaReaderClear(&mer1); - return; - } - - pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag); - metaReaderClear(&mer1); - - SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema; - SSchema* pCol1 = &pTagSchema->pSchema[0]; - if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) { - pOutputInfo->tbSink.autoCreateCtb = true; - } else { - pOutputInfo->tbSink.autoCreateCtb = false; - } + code = checkTagSchema(pTask, pVnode); + if (code != TSDB_CODE_SUCCESS) { + return; } bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks); @@ -1033,144 +1053,16 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { } else if (pDataBlock->info.type == STREAM_CHECKPOINT) { continue; } else { - pTask->execInfo.sink.numOfBlocks += 1; - - SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))}; - if (submitReq.aSubmitTbData == NULL) { - code = terrno; - tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code)); - return; - } - - SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP}; - code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData); - if (code != TSDB_CODE_SUCCESS) { - tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName); - continue; - } - - code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); - if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { - if (tbData.pCreateTbReq != NULL) { - tdDestroySVCreateTbReq(tbData.pCreateTbReq); - (void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, pDataBlock->info.id.groupId, id); - tbData.pCreateTbReq = NULL; - } - continue; - } - - void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData); - if (p == NULL) { - tqDebug("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id); - } - - code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1); - if (code) { // failed and continue - tqDebug("vgId:%d, s-task:%s submit msg failed, data lost", vgId, id); - } + code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs); } } } else { tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks); - SHashObj* pTableIndexMap = - taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - - SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))}; - if (submitReq.aSubmitTbData == NULL) { - code = terrno; - tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code)); - taosHashCleanup(pTableIndexMap); + if (streamTaskShouldStop(pTask)) { return; } - bool hasSubmit = false; - for (int32_t i = 0; i < numOfBlocks; i++) { - if (streamTaskShouldStop(pTask)) { - taosHashCleanup(pTableIndexMap); - tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE); - return; - } - - SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); - if (pDataBlock == NULL) { - continue; - } - - if (pDataBlock->info.type == STREAM_CHECKPOINT) { - continue; - } - - hasSubmit = true; - pTask->execInfo.sink.numOfBlocks += 1; - uint64_t groupId = pDataBlock->info.id.groupId; - - SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP}; - - int32_t* index = taosHashGet(pTableIndexMap, &groupId, sizeof(groupId)); - if (index == NULL) { // no data yet, append it - code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData); - if (code != TSDB_CODE_SUCCESS) { - tqError("vgId:%d dst-table gid:%" PRId64 " not exist, discard stream results", vgId, groupId); - continue; - } - - code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); - if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { - if (tbData.pCreateTbReq != NULL) { - tdDestroySVCreateTbReq(tbData.pCreateTbReq); - (void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, groupId, id); - tbData.pCreateTbReq = NULL; - } - continue; - } - - void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData); - if (p == NULL) { - tqError("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id); - continue; - } - - int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1; - code = taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size)); - if (code) { - tqError("vgId:%d, s-task:%s failed to put group into index map, code:%s", vgId, id, tstrerror(code)); - continue; - } - } else { - code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); - if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { - if (tbData.pCreateTbReq != NULL) { - tdDestroySVCreateTbReq(tbData.pCreateTbReq); - tbData.pCreateTbReq = NULL; - } - continue; - } - - SSubmitTbData* pExisted = taosArrayGet(submitReq.aSubmitTbData, *index); - if (pExisted == NULL) { - continue; - } - - code = doMergeExistedRows(pExisted, &tbData, id); - if (code != TSDB_CODE_SUCCESS) { - continue; - } - } - - pTask->execInfo.sink.numOfRows += pDataBlock->info.rows; - } - - taosHashCleanup(pTableIndexMap); - - if (hasSubmit) { - code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, numOfBlocks); - if (code) { // failed and continue - tqError("vgId:%d failed to build and send submit msg", vgId); - } - } else { - tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE); - tqDebug("vgId:%d, s-task:%s write results completed", vgId, id); - } + reubuildAndSendMultiResBlock(pTask, pBlocks, pVnode, earlyTs); } } @@ -1190,7 +1082,7 @@ bool hasOnlySubmitData(const SArray* pBlocks, int32_t numOfBlocks) { return true; } -int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) { +int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) { int32_t code = tSimpleHashPut(pSinkTableMap, &groupId, sizeof(uint64_t), &pTableSinkInfo, POINTER_BYTES); if (code != TSDB_CODE_SUCCESS) { taosMemoryFreeClear(pTableSinkInfo); @@ -1202,7 +1094,17 @@ int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, return code; } -int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) { +bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) { + void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t)); + if (pVal) { + *pInfo = *(STableSinkInfo**)pVal; + return true; + } + + return false; +} + +int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) { if (tSimpleHashGetSize(pSinkTableMap) == 0) { return TSDB_CODE_SUCCESS; } @@ -1223,8 +1125,8 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* return terrno; } - int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, - pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1); + int32_t code = + tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, IS_NEW_SUBTB_RULE(pTask)); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -1262,3 +1164,155 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* return TSDB_CODE_SUCCESS; } + +void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs) { + int32_t code = 0; + const char* id = pTask->id.idStr; + int32_t vgId = pTask->pMeta->vgId; + int32_t numOfBlocks = taosArrayGetSize(pBlocks); + int64_t suid = pTask->outputInfo.tbSink.stbUid; + STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; + char* stbFullName = pTask->outputInfo.tbSink.stbFullName; + + SHashObj* pTableIndexMap = + taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + + SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))}; + if (submitReq.aSubmitTbData == NULL) { + code = terrno; + tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code)); + taosHashCleanup(pTableIndexMap); + return; + } + + bool hasSubmit = false; + for (int32_t i = 0; i < numOfBlocks; i++) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + if (pDataBlock == NULL) { + continue; + } + + if (pDataBlock->info.type == STREAM_CHECKPOINT) { + continue; + } + + hasSubmit = true; + pTask->execInfo.sink.numOfBlocks += 1; + uint64_t groupId = pDataBlock->info.id.groupId; + + SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP}; + + int32_t* index = taosHashGet(pTableIndexMap, &groupId, sizeof(groupId)); + if (index == NULL) { // no data yet, append it + code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData); + if (code != TSDB_CODE_SUCCESS) { + tqError("vgId:%d dst-table gid:%" PRId64 " not exist, discard stream results", vgId, groupId); + continue; + } + + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); + if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { + if (tbData.pCreateTbReq != NULL) { + tdDestroySVCreateTbReq(tbData.pCreateTbReq); + (void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, groupId, id); + tbData.pCreateTbReq = NULL; + } + continue; + } + + void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData); + if (p == NULL) { + tqError("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id); + continue; + } + + int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1; + code = taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size)); + if (code) { + tqError("vgId:%d, s-task:%s failed to put group into index map, code:%s", vgId, id, tstrerror(code)); + continue; + } + } else { + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); + if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { + if (tbData.pCreateTbReq != NULL) { + tdDestroySVCreateTbReq(tbData.pCreateTbReq); + tbData.pCreateTbReq = NULL; + } + continue; + } + + SSubmitTbData* pExisted = taosArrayGet(submitReq.aSubmitTbData, *index); + if (pExisted == NULL) { + continue; + } + + code = doMergeExistedRows(pExisted, &tbData, id); + if (code != TSDB_CODE_SUCCESS) { + continue; + } + } + + pTask->execInfo.sink.numOfRows += pDataBlock->info.rows; + } + + taosHashCleanup(pTableIndexMap); + + if (hasSubmit) { + code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, numOfBlocks); + if (code) { // failed and continue + tqError("vgId:%d failed to build and send submit msg", vgId); + } + } else { + tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE); + tqDebug("vgId:%d, s-task:%s write results completed", vgId, id); + } +} + +int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode, int64_t earlyTs) { + int32_t code = 0; + STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; + int64_t suid = pTask->outputInfo.tbSink.stbUid; + const char* id = pTask->id.idStr; + int32_t vgId = TD_VID(pVnode); + char* stbFullName = pTask->outputInfo.tbSink.stbFullName; + + pTask->execInfo.sink.numOfBlocks += 1; + + SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))}; + if (submitReq.aSubmitTbData == NULL) { + tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(terrno)); + return terrno; + } + + SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP}; + code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData); + if (code != TSDB_CODE_SUCCESS) { + tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName); + return code; + } + + code = tqSetDstTableDataPayload(suid, pTSchema, index, pDataBlock, &tbData, earlyTs, id); + if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { + if (tbData.pCreateTbReq != NULL) { + tdDestroySVCreateTbReq(tbData.pCreateTbReq); + (void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, pDataBlock->info.id.groupId, id); + tbData.pCreateTbReq = NULL; + } + + return code; + } + + void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData); + if (p == NULL) { + tqDebug("vgId:%d, s-task:%s failed to build submit msg, code:%s, data lost", vgId, id, tstrerror(terrno)); + return terrno; + } + + code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1); + if (code) { // failed and continue + tqDebug("vgId:%d, s-task:%s submit msg failed, code:%s data lost", vgId, id, tstrerror(code)); + } + + return code; +} diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 727701e03e..b359cdfc81 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -289,7 +289,7 @@ void tFreeStreamTask(SStreamTask* pTask) { if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) { tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pSchemaWrapper); taosMemoryFree(pTask->outputInfo.tbSink.pTSchema); - tSimpleHashCleanup(pTask->outputInfo.tbSink.pTblInfo); + tSimpleHashCleanup(pTask->outputInfo.tbSink.pTbInfo); tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pTagSchema); } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { taosArrayDestroy(pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos); From 9d40a6d68bc37ac5c30e50d228ae54245d9de322 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:04:05 +0800 Subject: [PATCH 44/51] Update 3.3.3.0.md --- docs/zh/28-releases/03-notes/3.3.3.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/28-releases/03-notes/3.3.3.0.md b/docs/zh/28-releases/03-notes/3.3.3.0.md index 405ca83d71..bb7bc0f831 100644 --- a/docs/zh/28-releases/03-notes/3.3.3.0.md +++ b/docs/zh/28-releases/03-notes/3.3.3.0.md @@ -10,7 +10,7 @@ description: 3.3.3.0 版本说明 4. TDengine支持macOS企业版客户端 [企业版] 5. taosX日志默认不写入syslog [企业版] 6. 服务端记录所有慢查询信息到log库 -7. show cluster machines 查询结果中添加服务端版本号 +7. show cluster machines 查询结果中添加服务端版本号 [企业版] 8. 删除保留关键字LEVEL/ENCODE/COMPRESS, 可以作为列名/表名/数据库名等使用 9. 禁止动态修改临时目录 10. round 函数:支持四舍五入的精度 From d0618b6e8553a5f073266f82ab650dc63721399e Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Wed, 16 Oct 2024 23:35:53 +0800 Subject: [PATCH 45/51] fix:[TD-32592] fix bug when percentile split bucket. --- source/libs/function/src/tpercentile.c | 39 ++++++++++++++------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 29c48460c0..429ab52a8d 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -224,19 +224,18 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index *index = -1; - if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) { + if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v)) { return TSDB_CODE_SUCCESS; } // divide a range of [dMinVal, dMaxVal] into 1024 buckets double span = pBucket->range.dMaxVal - pBucket->range.dMinVal; - if (span < pBucket->numOfSlots) { - int32_t delta = (int32_t)(v - pBucket->range.dMinVal); - *index = (delta % pBucket->numOfSlots); + if (fabs(span) < DBL_EPSILON) { + *index = 0; } else { double slotSpan = span / pBucket->numOfSlots; *index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); - if (v == pBucket->range.dMaxVal) { + if (fabs(v - pBucket->range.dMaxVal) < DBL_EPSILON) { *index -= 1; } } @@ -583,48 +582,52 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction *result = getIdenticalDataVal(pMemBucket, i); return TSDB_CODE_SUCCESS; } - // try next round - pMemBucket->times += 1; - // qDebug("MemBucket:%p, start next round data bucketing, time:%d", pMemBucket, pMemBucket->times); - - pMemBucket->range = pSlot->range; - pMemBucket->total = 0; - - resetSlotInfo(pMemBucket); - - int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1); + tMemBucket *tmpBucket = NULL; + int32_t code = tMemBucketCreate(pMemBucket->bytes, pMemBucket->type, pSlot->range.dMinVal, pSlot->range.dMaxVal, + false, &tmpBucket); + if (TSDB_CODE_SUCCESS != code) { + tMemBucketDestroy(&tmpBucket); + return code; + } + int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times); SArray* list; void *p = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); if (p != NULL) { list = *(SArray **)p; if (list == NULL || list->size <= 0) { + tMemBucketDestroy(&tmpBucket); return -1; } } else { + tMemBucketDestroy(&tmpBucket); return -1; } for (int32_t f = 0; f < list->size; ++f) { int32_t *pageId = taosArrayGet(list, f); if (NULL == pageId) { + tMemBucketDestroy(&tmpBucket); return TSDB_CODE_OUT_OF_RANGE; } SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); if (pg == NULL) { + tMemBucketDestroy(&tmpBucket); return terrno; } - int32_t code = tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num); + code = tMemBucketPut(tmpBucket, pg->data, (int32_t)pg->num); if (code != TSDB_CODE_SUCCESS) { + tMemBucketDestroy(&tmpBucket); return code; } setBufPageDirty(pg, true); releaseBufPage(pMemBucket->pBuffer, pg); } - - return getPercentileImpl(pMemBucket, count - num, fraction, result); + code = getPercentileImpl(tmpBucket, count - num, fraction, result); + tMemBucketDestroy(&tmpBucket); + return code; } } else { num += pSlot->info.size; From 9017474e221541e8aba64b491d5318744ee53720 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 17 Oct 2024 15:35:32 +0800 Subject: [PATCH 46/51] adj ci --- tests/script/tsim/stream/windowClose.sim | 10 +++++----- tests/system-test/8-stream/max_delay_session.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/script/tsim/stream/windowClose.sim b/tests/script/tsim/stream/windowClose.sim index a9c55bd9de..8e4e666dc9 100644 --- a/tests/script/tsim/stream/windowClose.sim +++ b/tests/script/tsim/stream/windowClose.sim @@ -48,15 +48,15 @@ sql create table t1 using st tags(1); sql create table t2 using st tags(2); sql create stream stream2 trigger window_close into streamt2 as select _wstart, sum(a) from st interval(10s); -sql create stream stream3 trigger max_delay 1s into streamt3 as select _wstart, sum(a) from st interval(10s); +sql create stream stream3 trigger max_delay 5s into streamt3 as select _wstart, sum(a) from st interval(10s); sql create stream stream4 trigger window_close into streamt4 as select _wstart, sum(a) from t1 interval(10s); -sql create stream stream5 trigger max_delay 1s into streamt5 as select _wstart, sum(a) from t1 interval(10s); +sql create stream stream5 trigger max_delay 5s into streamt5 as select _wstart, sum(a) from t1 interval(10s); sql create stream stream6 trigger window_close into streamt6 as select _wstart, sum(a) from st session(ts, 10s); -sql create stream stream7 trigger max_delay 1s into streamt7 as select _wstart, sum(a) from st session(ts, 10s); +sql create stream stream7 trigger max_delay 5s into streamt7 as select _wstart, sum(a) from st session(ts, 10s); sql create stream stream8 trigger window_close into streamt8 as select _wstart, sum(a) from t1 session(ts, 10s); -sql create stream stream9 trigger max_delay 1s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s); +sql create stream stream9 trigger max_delay 5s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s); sql create stream stream10 trigger window_close into streamt10 as select _wstart, sum(a) from t1 state_window(b); -sql create stream stream11 trigger max_delay 1s into streamt11 as select _wstart, sum(a) from t1 state_window(b); +sql create stream stream11 trigger max_delay 5s into streamt11 as select _wstart, sum(a) from t1 state_window(b); run tsim/stream/checkTaskStatus.sim diff --git a/tests/system-test/8-stream/max_delay_session.py b/tests/system-test/8-stream/max_delay_session.py index 934fbbcac2..71827f03b4 100644 --- a/tests/system-test/8-stream/max_delay_session.py +++ b/tests/system-test/8-stream/max_delay_session.py @@ -92,7 +92,7 @@ class TDTestCase: def run(self): for fill_history_value in [None, 1]: for watermark in [None, random.randint(20, 30)]: - self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value) + self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(5, 8)}s", fill_history_value=fill_history_value) def stop(self): tdSql.close() From e64f55f5b1252ed27b872721869efee8eabbe6fd Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 17 Oct 2024 18:06:16 +0800 Subject: [PATCH 47/51] adj ci --- tests/script/tsim/stream/windowClose.sim | 56 ++++++++++++------------ 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/tests/script/tsim/stream/windowClose.sim b/tests/script/tsim/stream/windowClose.sim index 8e4e666dc9..2d4e6d7ea3 100644 --- a/tests/script/tsim/stream/windowClose.sim +++ b/tests/script/tsim/stream/windowClose.sim @@ -138,12 +138,12 @@ if $rows != 2 then goto loop1 endi -print step 1 max delay 2s +print step 1 max delay 5s sql create database test3 vgroups 4; sql use test3; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream13 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 interval(10s); +sql create stream stream13 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 interval(10s); run tsim/stream/checkTaskStatus.sim @@ -172,8 +172,8 @@ $now02 = $data02 $now12 = $data12 -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -188,7 +188,7 @@ if $data12 != $now12 then return -1 endi -print step 2 max delay 2s +print step 2 max delay 5s sql create database test4 vgroups 4; sql use test4; @@ -197,7 +197,7 @@ sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,t sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream stream14 trigger max_delay 2s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s); +sql create stream stream14 trigger max_delay 5s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s); run tsim/stream/checkTaskStatus.sim @@ -234,8 +234,8 @@ $now12 = $data12 $now22 = $data22 $now32 = $data32 -print step2 max delay 2s......... sleep 3s -sleep 3000 +print step2 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt14 order by 2; print $data00 $data01 $data02 @@ -264,8 +264,8 @@ if $data32 != $now32 then return -1 endi -print step2 max delay 2s......... sleep 3s -sleep 3000 +print step2 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt14 order by 2; print $data00 $data01 $data02 @@ -294,12 +294,12 @@ if $data32 != $now32 then return -1 endi -print step 2 max delay 2s +print step 2 max delay 5s sql create database test15 vgroups 4; sql use test15; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream15 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s); +sql create stream stream15 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s); run tsim/stream/checkTaskStatus.sim @@ -328,8 +328,8 @@ $now02 = $data02 $now12 = $data12 -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -344,8 +344,8 @@ if $data12 != $now12 then return -1 endi -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -362,12 +362,12 @@ endi print session max delay over -print step 3 max delay 2s +print step 3 max delay 5s sql create database test16 vgroups 4; sql use test16; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream16 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 state_window(a); +sql create stream stream16 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 state_window(a); run tsim/stream/checkTaskStatus.sim @@ -396,8 +396,8 @@ $now02 = $data02 $now12 = $data12 -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -412,8 +412,8 @@ if $data12 != $now12 then return -1 endi -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -430,12 +430,12 @@ endi print state max delay over -print step 4 max delay 2s +print step 4 max delay 5s sql create database test17 vgroups 4; sql use test17; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream17 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9; +sql create stream stream17 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9; run tsim/stream/checkTaskStatus.sim @@ -467,8 +467,8 @@ $now02 = $data02 $now12 = $data12 -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -483,8 +483,8 @@ if $data12 != $now12 then return -1 endi -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; From 8eacb58d9451999fc62d4434a24aa3ca2f456e48 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 18 Oct 2024 09:32:13 +0800 Subject: [PATCH 48/51] feat(stream):modify stream doc --- docs/zh/14-reference/03-taos-sql/14-stream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/03-taos-sql/14-stream.md b/docs/zh/14-reference/03-taos-sql/14-stream.md index d995c2a09b..3af8fa6921 100644 --- a/docs/zh/14-reference/03-taos-sql/14-stream.md +++ b/docs/zh/14-reference/03-taos-sql/14-stream.md @@ -153,7 +153,7 @@ SELECT * from information_schema.`ins_streams`; 由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 -因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。 +因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY最小时间是5s,如果低于5s,创建流计算时会报错。 MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 From dbd8d6891d0940a507a2a7e1e2fe714ba7ba8cf3 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Fri, 18 Oct 2024 12:39:49 +0800 Subject: [PATCH 49/51] fix: subprocess.popen redirect to PIPE , pipe buffer while fill full case dead-lock --- tools/auto/stmt2Performance/json/query.json | 5 +- .../auto/stmt2Performance/json/template.json | 6 +- tools/auto/stmt2Performance/stmt2Perf.py | 64 +++++++++---------- tools/auto/testCompression/json/query.json | 5 +- tools/auto/testCompression/testCompression.py | 45 +++++++------ 5 files changed, 61 insertions(+), 64 deletions(-) diff --git a/tools/auto/stmt2Performance/json/query.json b/tools/auto/stmt2Performance/json/query.json index 70f1d90edc..a6e50daae2 100644 --- a/tools/auto/stmt2Performance/json/query.json +++ b/tools/auto/stmt2Performance/json/query.json @@ -12,11 +12,10 @@ "query_mode": "taosc", "specified_table_query": { "query_interval": 0, - "concurrent": 10, + "threads": 10, "sqls": [ { - "sql": "select count(*) from meters", - "result": "./query_result.txt" + "sql": "select count(*) from meters" } ] } diff --git a/tools/auto/stmt2Performance/json/template.json b/tools/auto/stmt2Performance/json/template.json index 8c54c5be22..6d015370e9 100644 --- a/tools/auto/stmt2Performance/json/template.json +++ b/tools/auto/stmt2Performance/json/template.json @@ -17,7 +17,9 @@ "dbinfo": { "name": "dbrate", "vgroups": 1, - "drop": "yes" + "drop": "yes", + "wal_retention_size": 1, + "wal_retention_period": 1 }, "super_tables": [ { @@ -27,7 +29,7 @@ "childtable_prefix": "d", "insert_mode": "@STMT_MODE", "interlace_rows": @INTERLACE_MODE, - "insert_rows": 100000, + "insert_rows": 10000, "timestamp_step": 1, "start_timestamp": "2020-10-01 00:00:00.000", "auto_create_table": "no", diff --git a/tools/auto/stmt2Performance/stmt2Perf.py b/tools/auto/stmt2Performance/stmt2Perf.py index 4d99f2483d..ae66504c8a 100644 --- a/tools/auto/stmt2Performance/stmt2Perf.py +++ b/tools/auto/stmt2Performance/stmt2Perf.py @@ -34,28 +34,6 @@ def exec(command, show=True): print(f"exec {command}\n") return os.system(command) -# run return output and error -def run(command, timeout = 60, show=True): - if(show): - print(f"run {command} timeout={timeout}s\n") - - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - process.wait(timeout) - - output = process.stdout.read().decode(encoding="gbk") - error = process.stderr.read().decode(encoding="gbk") - - return output, error - -# return list after run -def runRetList(command, timeout=10, first=True): - output,error = run(command, timeout) - if first: - return output.splitlines() - else: - return error.splitlines() - - def readFileContext(filename): file = open(filename) context = file.read() @@ -78,6 +56,27 @@ def appendFileContext(filename, context): except: print(f"appand file error context={context} .") +# run return output and error +def run(command, show=True): + # out to file + out = "out.txt" + err = "err.txt" + ret = exec(command + f" 1>{out} 2>{err}", True) + + # read from file + output = readFileContext(out) + error = readFileContext(err) + + return output, error + +# return list after run +def runRetList(command, first=True): + output,error = run(command) + if first: + return output.splitlines() + else: + return error.splitlines() + def getFolderSize(folder): total_size = 0 for dirpath, dirnames, filenames in os.walk(folder): @@ -134,8 +133,6 @@ def getMatch(datatype, algo): def generateJsonFile(stmt, interlace): - print(f"doTest stmt: {stmt} interlace_rows={interlace}\n") - # replace datatype context = readFileContext(templateFile) # replace compress @@ -204,9 +201,16 @@ def writeTemplateInfo(resultFile): insertRows = findContextValue(context, "insert_rows") bindVGroup = findContextValue(context, "thread_bind_vgroup") nThread = findContextValue(context, "thread_count") + batch = findContextValue(context, "num_of_records_per_req") + if bindVGroup.lower().find("yes") != -1: nThread = vgroups - line = f"thread_bind_vgroup = {bindVGroup}\nvgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\ninsertThreads = {nThread} \n\n" + line = f"thread_bind_vgroup = {bindVGroup}\n" + line += f"vgroups = {vgroups}\n" + line += f"childtable_count = {childCount}\n" + line += f"insert_rows = {insertRows}\n" + line += f"insertThreads = {nThread}\n" + line += f"batchSize = {batch}\n\n" print(line) appendFileContext(resultFile, line) @@ -247,14 +251,8 @@ def totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, # %("No", "stmtMode", "interlaceRows", "spent", "spent-real", "writeSpeed", "write-real", "query-QPS", "dataSize", "rate") Number += 1 - ''' - context = "%2s %6s %10s %10s %10s %15s %15s %16s %16s %16s %16s %16s %8s %8s %8s\n"%( - Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " rows/s", writeReal + " rows/s", - min, avg, p90, p99, max, - querySpeed, str(totalSize) + " MB", rate + "%") - ''' context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%( - Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + "r/s", writeReal + "r/s", + Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " r/s", writeReal + " r/s", min, avg, p90, p99, max + "ms", querySpeed, str(totalSize) + " MB", rate + "%") @@ -323,7 +321,7 @@ def testWrite(jsonFile): def testQuery(): command = f"taosBenchmark -f json/query.json" - lines = runRetList(command, 60000) + lines = runRetList(command) # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 speed = None diff --git a/tools/auto/testCompression/json/query.json b/tools/auto/testCompression/json/query.json index e810c1009f..35c39e831c 100644 --- a/tools/auto/testCompression/json/query.json +++ b/tools/auto/testCompression/json/query.json @@ -12,11 +12,10 @@ "query_mode": "taosc", "specified_table_query": { "query_interval": 0, - "concurrent": 10, + "threads": 10, "sqls": [ { - "sql": "select * from meters", - "result": "./query_res0.txt" + "sql": "select * from meters" } ] } diff --git a/tools/auto/testCompression/testCompression.py b/tools/auto/testCompression/testCompression.py index ee922a1a23..1a0d714c44 100644 --- a/tools/auto/testCompression/testCompression.py +++ b/tools/auto/testCompression/testCompression.py @@ -34,28 +34,6 @@ def exec(command, show=True): print(f"exec {command}\n") return os.system(command) -# run return output and error -def run(command, timeout = 60, show=True): - if(show): - print(f"run {command} timeout={timeout}s\n") - - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - process.wait(timeout) - - output = process.stdout.read().decode(encoding="gbk") - error = process.stderr.read().decode(encoding="gbk") - - return output, error - -# return list after run -def runRetList(command, timeout=10, first=True): - output,error = run(command, timeout) - if first: - return output.splitlines() - else: - return error.splitlines() - - def readFileContext(filename): file = open(filename) context = file.read() @@ -78,6 +56,27 @@ def appendFileContext(filename, context): except: print(f"appand file error context={context} .") +# run return output and error +def run(command, show=True): + # out to file + out = "out.txt" + err = "err.txt" + ret = exec(command + f" 1>{out} 2>{err}", True) + + # read from file + output = readFileContext(out) + error = readFileContext(err) + + return output, error + +# return list after run +def runRetList(command, first=True): + output,error = run(command) + if first: + return output.splitlines() + else: + return error.splitlines() + def getFolderSize(folder): total_size = 0 for dirpath, dirnames, filenames in os.walk(folder): @@ -273,7 +272,7 @@ def testWrite(jsonFile): def testQuery(): command = f"taosBenchmark -f json/query.json" - lines = runRetList(command, 60000) + lines = runRetList(command) # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 speed = None From 528465ae215fc6c53ed38916bde6229dbf8e0555 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 19 Oct 2024 17:14:19 +0800 Subject: [PATCH 50/51] fix: flush database put to write end --- tools/auto/testCompression/testCompression.py | 64 +++++++++++-------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/tools/auto/testCompression/testCompression.py b/tools/auto/testCompression/testCompression.py index 1a0d714c44..4314817067 100644 --- a/tools/auto/testCompression/testCompression.py +++ b/tools/auto/testCompression/testCompression.py @@ -195,48 +195,55 @@ def findContextValue(context, label): def writeTemplateInfo(resultFile): # create info context = readFileContext(templateFile) + dbname = findContextValue(context, "name") vgroups = findContextValue(context, "vgroups") childCount = findContextValue(context, "childtable_count") insertRows = findContextValue(context, "insert_rows") line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n" print(line) appendFileContext(resultFile, line) + return dbname def totalCompressRate(algo, resultFile, writeSpeed, querySpeed): global Number - # flush - command = 'taos -s "flush database dbrate;"' - rets = exec(command) - command = 'taos -s "compact database dbrate;"' - rets = exec(command) - waitCompactFinish(60) + loop = 30 - # read compress rate - command = 'taos -s "show table distributed dbrate.meters\G;"' - rets = runRetList(command) - print(rets) - str1 = rets[5] - arr = str1.split(" ") + while loop > 0: + loop -= 1 - # Total_Size KB - str2 = arr[2] - pos = str2.find("=[") - totalSize = int(float(str2[pos+2:])/1024) + # flush database + command = 'taos -s "flush database dbrate;"' + exec(command) + time.sleep(1) - # Compression_Ratio - str2 = arr[6] - pos = str2.find("=[") - rate = str2[pos+2:] - print("rate =" + rate) + # read compress rate + command = 'taos -s "show table distributed dbrate.meters\G;"' + rets = runRetList(command) + print(rets) - # total data file size - #dataSize = getFolderSize(f"{dataDir}/vnode/") - #dataSizeMB = int(dataSize/1024/1024) + str1 = rets[5] + arr = str1.split(" ") - # appand to file - + # Total_Size KB + str2 = arr[2] + pos = str2.find("=[") + totalSize = int(float(str2[pos+2:])/1024) + + # Compression_Ratio + str2 = arr[6] + pos = str2.find("=[") + rate = str2[pos+2:] + print("rate =" + rate) + if rate != "0.00": + break + + # total data file size + #dataSize = getFolderSize(f"{dataDir}/vnode/") + #dataSizeMB = int(dataSize/1024/1024) + + # appand to file Number += 1 context = "%10s %10s %10s %10s %30s %15s\n"%( Number, algo, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed) showLog(context) @@ -268,6 +275,10 @@ def testWrite(jsonFile): speed = context[pos: end] #print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n") + + # flush database + command = 'taos -s "flush database dbrate;"' + exec(command) return speed def testQuery(): @@ -295,7 +306,6 @@ def testQuery(): def doTest(algo, resultFile): print(f"doTest algo: {algo} \n") - #cleanAndStartTaosd() # json jsonFile = generateJsonFile(algo) From 98702d8fb87750aa5f8bf8a8be32c3d7cf0e5d48 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 20 Oct 2024 20:36:24 +0800 Subject: [PATCH 51/51] fix: query time from 20 reduce to 5 --- tools/auto/stmt2Performance/json/query.json | 2 +- tools/auto/testCompression/json/query.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/auto/stmt2Performance/json/query.json b/tools/auto/stmt2Performance/json/query.json index a6e50daae2..3b6750be13 100644 --- a/tools/auto/stmt2Performance/json/query.json +++ b/tools/auto/stmt2Performance/json/query.json @@ -8,7 +8,7 @@ "confirm_parameter_prompt": "no", "continue_if_fail": "yes", "databases": "dbrate", - "query_times": 20, + "query_times": 5, "query_mode": "taosc", "specified_table_query": { "query_interval": 0, diff --git a/tools/auto/testCompression/json/query.json b/tools/auto/testCompression/json/query.json index 35c39e831c..12bba6af9b 100644 --- a/tools/auto/testCompression/json/query.json +++ b/tools/auto/testCompression/json/query.json @@ -8,7 +8,7 @@ "confirm_parameter_prompt": "no", "continue_if_fail": "yes", "databases": "dbrate", - "query_times": 20, + "query_times": 5, "query_mode": "taosc", "specified_table_query": { "query_interval": 0,