From f70321ee53f2403e770acd2bda3a41038b437549 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 10:52:48 +0800 Subject: [PATCH 01/37] fix(vnd): check return value. --- source/dnode/vnode/src/vnd/vnodeSvr.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 371eaa0774..1b1bb9257d 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -640,40 +640,39 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg } } break; case TDMT_STREAM_TASK_DROP: { - if (tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { + if ((code = tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_TASK_UPDATE_CHKPT: { - if (tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { + if ((code = tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_CONSEN_CHKPT: { - if (pVnode->restored) { - if (tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg) < 0) { - goto _err; - } + if (pVnode->restored && (code = tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg)) < 0) { + goto _err; } + } break; case TDMT_STREAM_TASK_PAUSE: { if (pVnode->restored && vnodeIsLeader(pVnode) && - tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) { + (code = tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_TASK_RESUME: { if (pVnode->restored && vnodeIsLeader(pVnode) && - tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) { + (code = tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_VND_STREAM_TASK_RESET: { - if (pVnode->restored && vnodeIsLeader(pVnode)) { - if (tqProcessTaskResetReq(pVnode->pTq, pMsg) < 0) { + if (pVnode->restored && vnodeIsLeader(pVnode) && + (code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) { goto _err; } - } + } break; case TDMT_VND_ALTER_CONFIRM: needCommit = pVnode->config.hashChange; @@ -693,10 +692,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg case TDMT_VND_DROP_INDEX: vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp); break; - case TDMT_VND_STREAM_CHECK_POINT_SOURCE: + case TDMT_VND_STREAM_CHECK_POINT_SOURCE: // always return true tqProcessTaskCheckPointSourceReq(pVnode->pTq, pMsg, pRsp); break; - case TDMT_VND_STREAM_TASK_UPDATE: + case TDMT_VND_STREAM_TASK_UPDATE: // always return true tqProcessTaskUpdateReq(pVnode->pTq, pMsg); break; case TDMT_VND_COMPACT: @@ -752,7 +751,7 @@ _exit: _err: vError("vgId:%d, process %s request failed since %s, ver:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), - tstrerror(terrno), ver); + tstrerror(code), ver); return code; } From 3367f129daf04dbd3731c4481dd81a1a0dacff10 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 11:27:37 +0800 Subject: [PATCH 02/37] fix(vnd): check return value. --- source/dnode/vnode/src/vnd/vnodeSvr.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 1b1bb9257d..dd13c975cf 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -633,9 +633,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg } break; case TDMT_STREAM_TASK_DEPLOY: { - int32_t code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; + if ((code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len)) != TSDB_CODE_SUCCESS) { goto _err; } } break; From 5dc933f5f1b80048a114dcd2d3ae652d05779c3c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 14:52:51 +0800 Subject: [PATCH 03/37] refactor: add some logs. --- source/common/src/tdatablock.c | 1 + source/dnode/vnode/src/tsdb/tsdbRead2.c | 4 +++- source/util/src/tarray.c | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d8a66f82bf..0d00a6a4c7 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2529,6 +2529,7 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); if (pColInfoData == NULL) { code = terrno; + uError("invalid param, size of list:%d index k:%d", (int32_t) taosArrayGetSize(pDataBlock->pDataBlock), k) goto _exit; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 5b6511a38e..4e253d7c2e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -855,6 +855,7 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S STableBlockScanInfo** p = taosArrayGetLast(pTableScanInfoList); if (p == NULL) { clearBrinBlockIter(&iter); + tsdbError("invalid param, empty in tablescanInfoList, %s", pReader->idStr); return TSDB_CODE_INVALID_PARA; } @@ -5256,7 +5257,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) { // NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit // the data should be ingested in round-robin and all the child tables should be createted before ingesting data // the version range of query will be used to identify the correctness of suspend/resume functions. - // this function will blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files + // this function will be blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files #if SUSPEND_RESUME_TEST if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) { tsem_wait(&pReader->resumeAfterSuspend); @@ -5909,6 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_ } else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing } else { code = TSDB_CODE_INVALID_PARA; + tsdbError("invalid mr.me.type:%d %s, code:%s", mr.me.type, tstrerror(code)); metaReaderClear(&mr); return code; } diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 7989a2468b..b94bb512e2 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -200,6 +200,7 @@ void* taosArrayPop(SArray* pArray) { void* taosArrayGet(const SArray* pArray, size_t index) { if (NULL == pArray) { terrno = TSDB_CODE_INVALID_PARA; + uError("failed to return value from array of null ptr"); return NULL; } From 5c1cffed692e27fcbc042bd2f2c571c3b5357a8d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 15:54:15 +0800 Subject: [PATCH 04/37] fix(stream): add some logs. --- source/dnode/vnode/src/tqCommon/tqCommon.c | 7 ++----- source/libs/stream/src/streamMeta.c | 4 ++++ source/libs/stream/src/streamSched.c | 6 +++++- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 6b7e857120..3871011407 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -1119,10 +1119,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t int32_t vgId = pMeta->vgId; int32_t code = 0; - if (pTask == NULL) { - return -1; - } - streamTaskResume(pTask); ETaskStatus status = streamTaskGetStatus(pTask).state; @@ -1150,7 +1146,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t } } - streamMetaReleaseTask(pMeta, pTask); return code; } @@ -1173,6 +1168,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode); if (code != 0) { + streamMetaReleaseTask(pMeta, pTask); return code; } @@ -1186,6 +1182,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m streamMutexUnlock(&pHTask->lock); code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode); + streamMetaReleaseTask(pMeta, pHTask); } return code; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 44c9e76906..29152c6205 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -759,6 +759,10 @@ void streamMetaAcquireOneTask(SStreamTask* pTask) { } void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { + if (pTask == NULL) { + return; + } + int32_t taskId = pTask->id.taskId; int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1); diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 98920e6f70..095a5af6d4 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -63,7 +63,11 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3 pRunReq->reqType = execType; SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; - return tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); + int32_t code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); + if (code) { + stError("vgId:%d failed to put msg into stream queue, code:%s, %x", vgId, tstrerror(code), taskId); + } + return code; } void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; } From d2f2a931fb0cf58c27434a04b6e8ad9a53fb9916 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 16:21:11 +0800 Subject: [PATCH 05/37] fix(util): reset the returned length value. --- source/common/src/tdatablock.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2047573b74..98e58c8bd7 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2530,7 +2530,6 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); if (pColInfoData == NULL) { code = terrno; - uError("invalid param, size of list:%d index k:%d", (int32_t) taosArrayGetSize(pDataBlock->pDataBlock), k) goto _exit; } @@ -2611,7 +2610,10 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf if (code < 0) { uError("func %s failed to convert to ucs charset since %s", __func__, tstrerror(code)); goto _exit; + } else { // reset the length value + code = TSDB_CODE_SUCCESS; } + len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf); if (len >= size - 1) goto _exit; } break; From 6e43521ba9fb2e6d4b580ef09639a16bc3a5e9d8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 18:28:14 +0800 Subject: [PATCH 06/37] fix(stream): only keep the latest pause operation status. --- source/libs/stream/src/streamTaskSm.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 3501d30be4..a10c4c30d5 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -485,6 +485,11 @@ int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, _ static void keepPrevInfo(SStreamTaskSM* pSM) { STaskStateTrans* pTrans = pSM->pActiveTrans; + // we only keep the latest pause state + if (pSM->prev.state.state == TASK_STATUS__PAUSE && pSM->current.state == TASK_STATUS__PAUSE) { + return; + } + pSM->prev.state = pSM->current; pSM->prev.evt = pTrans->event; } @@ -501,9 +506,10 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even if (pTrans == NULL) { ETaskStatus s = pSM->current.state; - if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && - s != TASK_STATUS__UNINIT && s != TASK_STATUS__READY) { - stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, GET_EVT_NAME(pSM->prev.evt)); + if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT && + s != TASK_STATUS__READY) { + stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, + GET_EVT_NAME(pSM->prev.evt)); } // the pSM->prev.evt may be 0, so print string is not appropriate. From 7e1c6b07392f21b29c221af1399b73754ba61617 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 18:34:52 +0800 Subject: [PATCH 07/37] fix(stream): avoid the later pause overwrite the previous pause state. --- source/libs/stream/src/streamTaskSm.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index a10c4c30d5..17d5d884a7 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -485,11 +485,6 @@ int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, _ static void keepPrevInfo(SStreamTaskSM* pSM) { STaskStateTrans* pTrans = pSM->pActiveTrans; - // we only keep the latest pause state - if (pSM->prev.state.state == TASK_STATUS__PAUSE && pSM->current.state == TASK_STATUS__PAUSE) { - return; - } - pSM->prev.state = pSM->current; pSM->prev.evt = pTrans->event; } @@ -527,10 +522,13 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even return TSDB_CODE_STREAM_INVALID_STATETRANS; } - keepPrevInfo(pSM); + // repeat pause will not overwrite the previous pause state + if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) { + keepPrevInfo(pSM); - pSM->current = pTrans->next; - pSM->pActiveTrans = NULL; + pSM->current = pTrans->next; + pSM->pActiveTrans = NULL; + } // todo remove it // todo: handle the error code From a197b20466f111589d3822aa51f3e47eaf7fbf12 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 19:28:08 +0800 Subject: [PATCH 08/37] other: update logs. --- source/libs/stream/src/streamTimer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamTimer.c b/source/libs/stream/src/streamTimer.c index 8b77fe7cb1..0da9acfd1d 100644 --- a/source/libs/stream/src/streamTimer.c +++ b/source/libs/stream/src/streamTimer.c @@ -56,7 +56,7 @@ void streamTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* pParam, void* } } - stDebug("vgId:%d start %s tmr succ", vgId, pMsg); + stTrace("vgId:%d start %s tmr succ", vgId, pMsg); } void streamTmrStop(tmr_h tmrId) { From 50ceb19cbff69a3f96d7230e9f7b03667614f1bc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 11 Oct 2024 19:46:54 +0800 Subject: [PATCH 09/37] fix(stream): reset the activeTrans if pause recv repeatly. --- source/libs/stream/src/streamTaskSm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 17d5d884a7..c3a2742aa2 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -525,11 +525,12 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even // repeat pause will not overwrite the previous pause state if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) { keepPrevInfo(pSM); - pSM->current = pTrans->next; - pSM->pActiveTrans = NULL; + } else { + stDebug("s-task:%s repeat pause evt recv, not update prev status", id); } + pSM->pActiveTrans = NULL; // todo remove it // todo: handle the error code // on success callback, add into lock if necessary, or maybe we should add an option for this? From 618ec35190d5fa9f86f227767f87db30002aa621 Mon Sep 17 00:00:00 2001 From: charles Date: Sat, 12 Oct 2024 15:15:15 +0800 Subject: [PATCH 10/37] update encrypt test case for ts-5507 by charles --- tests/army/db-encrypt/basic.py | 30 ++++++++++++++++++++++++++++++ tests/parallel_test/cases.task | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/army/db-encrypt/basic.py b/tests/army/db-encrypt/basic.py index 8d30bbcfe2..ea648f1b8f 100644 --- a/tests/army/db-encrypt/basic.py +++ b/tests/army/db-encrypt/basic.py @@ -13,6 +13,7 @@ from frame.srvCtl import * from frame.caseBase import * from frame import * from frame.autogen import * +from frame import epath # from frame.server.dnodes import * # from frame.server.cluster import * @@ -20,7 +21,9 @@ from frame.autogen import * class TDTestCase(TBase): def init(self, conn, logSql, replicaVar=1): + updatecfgDict = {'dDebugFlag':131} super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1") + self.valgrind = 0 self.db = "test" self.stb = "meters" @@ -50,9 +53,36 @@ class TDTestCase(TBase): tdSql.error("create encrypt_key '12345678abcdefghi'") tdSql.error("create database test ENCRYPT_ALGORITHM 'sm4'") + def recreate_dndoe_encrypt_key(self): + """ + Description: From the jira TS-5507, the encrypt key can be recreated. + create: + 2024-09-23 created by Charles + update: + None + """ + # taosd path + taosd_path = epath.binPath() + tdLog.info(f"taosd_path: {taosd_path}") + # dnode2 path + dndoe2_path = tdDnodes.getDnodeDir(2) + dnode2_data_path = os.sep.join([dndoe2_path, "data"]) + dnode2_cfg_path = os.sep.join([dndoe2_path, "cfg"]) + tdLog.info(f"dnode2_path: {dnode2_data_path}") + # stop dnode2 + tdDnodes.stoptaosd(2) + tdLog.info("stop dndoe2") + # delete dndoe2 data + cmd = f"rm -rf {dnode2_data_path}" + os.system(cmd) + # recreate the encrypt key for dnode2 + os.system(f"{os.sep.join([taosd_path, "taosd"])} -y '1234567890' -c {dnode2_cfg_path}") + tdLog.info("test case: recreate the encrypt key for dnode2 passed") + def run(self): self.create_encrypt_db_error() self.create_encrypt_db() + self.recreate_dndoe_encrypt_key() def stop(self): tdSql.close() diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 0d3ed1f8e6..5d94c2a6b1 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -10,7 +10,7 @@ # army-test # ,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2 -,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py +,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py -N 3 -M 3 ,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py From e7b5c72ff23d3f75f4332c758cf06240efaac7a4 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Sat, 12 Oct 2024 18:13:11 +0800 Subject: [PATCH 11/37] Update basic.py --- tests/army/db-encrypt/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/army/db-encrypt/basic.py b/tests/army/db-encrypt/basic.py index ea648f1b8f..a93b85f0da 100644 --- a/tests/army/db-encrypt/basic.py +++ b/tests/army/db-encrypt/basic.py @@ -76,7 +76,7 @@ class TDTestCase(TBase): cmd = f"rm -rf {dnode2_data_path}" os.system(cmd) # recreate the encrypt key for dnode2 - os.system(f"{os.sep.join([taosd_path, "taosd"])} -y '1234567890' -c {dnode2_cfg_path}") + os.system(f"{os.sep.join([taosd_path, 'taosd'])} -y '1234567890' -c {dnode2_cfg_path}") tdLog.info("test case: recreate the encrypt key for dnode2 passed") def run(self): From 8663779def87d90d1702924f5d1c81a34633e73c Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 03:56:47 +0000 Subject: [PATCH 12/37] fix/wal-load-file-set --- source/libs/wal/src/walMeta.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 9ade5e5638..cb9f6e2dfe 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -253,6 +253,7 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) int j = 0; // both of the lists in asc order + /* for (int i = 0; i < actualFileNum; i++) { SWalFileInfo* pLogInfo = taosArrayGet(actualLogList, i); while (j < metaFileNum) { @@ -268,6 +269,7 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } } } + */ taosArrayClear(metaLogList); @@ -400,6 +402,17 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) { TAOS_RETURN(TSDB_CODE_SUCCESS); } +void printFileSet(SArray* fileSet) { + int32_t sz = taosArrayGetSize(fileSet); + for (int32_t i = 0; i < sz; i++) { + SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i); + wInfo("firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 ", createTs:%" PRId64 + ", closeTs:%" PRId64, + pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, pFileInfo->createTs, + pFileInfo->closeTs); + } +} + int32_t walCheckAndRepairMeta(SWal* pWal) { // load log files, get first/snapshot/last version info int32_t code = 0; @@ -460,6 +473,9 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { taosArraySort(actualLog, compareWalFileInfo); + wInfo("vgId:%d, wal path:%s, actual log file num:%" PRId64, pWal->cfg.vgId, pWal->path, taosArrayGetSize(actualLog)); + printFileSet(actualLog); + int metaFileNum = taosArrayGetSize(pWal->fileInfoSet); int actualFileNum = taosArrayGetSize(actualLog); int64_t firstVerPrev = pWal->vers.firstVer; @@ -474,6 +490,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { TAOS_RETURN(code); } + wInfo("vgId:%d, wal path:%s, meta log file num:%" PRId64, pWal->cfg.vgId, pWal->path, + taosArrayGetSize(pWal->fileInfoSet)); + printFileSet(pWal->fileInfoSet); + int32_t sz = taosArrayGetSize(pWal->fileInfoSet); // scan and determine the lastVer @@ -1124,6 +1144,10 @@ int32_t walLoadMeta(SWal* pWal) { (void)taosCloseFile(&pFile); taosMemoryFree(buf); + wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%" PRId64, pWal->cfg.vgId, fnameStr, + taosArrayGetSize(pWal->fileInfoSet)); + printFileSet(pWal->fileInfoSet); + TAOS_RETURN(code); } From ca7f490e6d6fbf07aa400c1c37d292180985924c Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 15 Oct 2024 16:53:15 +0800 Subject: [PATCH 13/37] fix invaild snapshotVer while repair wal meta file --- include/libs/wal/wal.h | 1 + source/dnode/mnode/impl/src/mndMain.c | 1 + source/dnode/vnode/src/vnd/vnodeCfg.c | 1 + source/dnode/vnode/src/vnd/vnodeCommit.c | 1 + source/libs/wal/src/walMeta.c | 6 ++++++ 5 files changed, 10 insertions(+) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 74ab0bf484..f95b3f20ca 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -50,6 +50,7 @@ typedef struct { int32_t rollPeriod; // secs int64_t retentionSize; int64_t segSize; + int64_t committed; EWalType level; // wal level int32_t encryptAlgorithm; char encryptKey[ENCRYPT_KEY_LEN + 1]; diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index bee971b966..685ad2b7a5 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -515,6 +515,7 @@ static int32_t mndInitWal(SMnode *pMnode) { .fsyncPeriod = 0, .rollPeriod = -1, .segSize = -1, + .committed = -1, .retentionPeriod = 0, .retentionSize = 0, .level = TAOS_WAL_FSYNC, diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index d3acea4766..7c789e84ae 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -45,6 +45,7 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1, .retentionPeriod = -1, .rollPeriod = 0, .segSize = 0, + .committed = 0, .retentionSize = -1, .level = TAOS_WAL_WRITE, .clearFiles = 0, diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 4a4d305f25..dae2b3a5ec 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -257,6 +257,7 @@ int vnodeLoadInfo(const char *dir, SVnodeInfo *pInfo) { code = vnodeDecodeInfo(pData, pInfo); TSDB_CHECK_CODE(code, lino, _exit); + pInfo->config.walCfg.committed = pInfo->state.committed; _exit: if (code) { if (pFile) { diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 9ade5e5638..8649581d5d 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -282,6 +282,12 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } static void walAlignVersions(SWal* pWal) { + if (pWal->cfg.committed > 0 && pWal->cfg.committed != pWal->vers.snapshotVer) { + wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is different from commited:%" PRId64 + ". in vnode/mnode. align with it.", + pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); + pWal->vers.snapshotVer = pWal->cfg.committed; + } if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) { wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId, pWal->vers.firstVer, pWal->vers.snapshotVer); From e4373116b2e566ff0834b3b65f76abc1ab61abf3 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 09:04:20 +0000 Subject: [PATCH 14/37] fix/wal-load-file-set --- source/libs/wal/src/walMeta.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index cb9f6e2dfe..f84496eee2 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -253,7 +253,6 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) int j = 0; // both of the lists in asc order - /* for (int i = 0; i < actualFileNum; i++) { SWalFileInfo* pLogInfo = taosArrayGet(actualLogList, i); while (j < metaFileNum) { @@ -269,7 +268,6 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } } } - */ taosArrayClear(metaLogList); @@ -553,6 +551,7 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { // repair ts of files TAOS_CHECK_RETURN(walRepairLogFileTs(pWal, &updateMeta)); + printFileSet(pWal->fileInfoSet); // update meta file if (updateMeta) { TAOS_CHECK_RETURN(walSaveMeta(pWal)); From 47d39c4ab8d5295750992b7373f57bb4943ec4fd Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 15 Oct 2024 17:23:28 +0800 Subject: [PATCH 15/37] fix wal test in ci --- source/libs/wal/test/walMetaTest.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index 8bd4de0a89..a0285f1363 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -127,6 +127,7 @@ class WalRetentionEnv : public ::testing::Test { SWalCfg cfg; cfg.rollPeriod = -1; cfg.segSize = -1; + cfg.committed =-1; cfg.retentionPeriod = -1; cfg.retentionSize = 0; cfg.rollPeriod = 0; From 39c429182e04a034fbd1f5dd8a95ff88056a79b2 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 10:46:25 +0000 Subject: [PATCH 16/37] fix/wal-load-file-set-fix-case --- source/libs/wal/src/walMeta.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index f84496eee2..17830ff200 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -471,7 +471,8 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { taosArraySort(actualLog, compareWalFileInfo); - wInfo("vgId:%d, wal path:%s, actual log file num:%" PRId64, pWal->cfg.vgId, pWal->path, taosArrayGetSize(actualLog)); + wInfo("vgId:%d, wal path:%s, actual log file num:%d", pWal->cfg.vgId, pWal->path, + (int32_t)taosArrayGetSize(actualLog)); printFileSet(actualLog); int metaFileNum = taosArrayGetSize(pWal->fileInfoSet); @@ -488,8 +489,8 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { TAOS_RETURN(code); } - wInfo("vgId:%d, wal path:%s, meta log file num:%" PRId64, pWal->cfg.vgId, pWal->path, - taosArrayGetSize(pWal->fileInfoSet)); + wInfo("vgId:%d, wal path:%s, meta log file num:%d", pWal->cfg.vgId, pWal->path, + (int32_t)taosArrayGetSize(pWal->fileInfoSet)); printFileSet(pWal->fileInfoSet); int32_t sz = taosArrayGetSize(pWal->fileInfoSet); @@ -1143,8 +1144,8 @@ int32_t walLoadMeta(SWal* pWal) { (void)taosCloseFile(&pFile); taosMemoryFree(buf); - wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%" PRId64, pWal->cfg.vgId, fnameStr, - taosArrayGetSize(pWal->fileInfoSet)); + wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%d", pWal->cfg.vgId, fnameStr, + (int32_t)taosArrayGetSize(pWal->fileInfoSet)); printFileSet(pWal->fileInfoSet); TAOS_RETURN(code); From 2b83a20c69008dcbc532e88215dc96a4390e9caa Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Mon, 14 Oct 2024 17:19:29 +0800 Subject: [PATCH 17/37] Revert "fix:[TD-32334] Generate correct time window when using interp with fill next and linear." This reverts commit 77e63d0922f6d230a314d28863744185faab8aa5. --- source/libs/executor/src/timesliceoperator.c | 20 ++-- tests/system-test/2-query/interp.py | 120 +++++++------------ 2 files changed, 50 insertions(+), 90 deletions(-) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 70bf26405e..2ea300ace8 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -278,7 +278,7 @@ static bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t in } static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock, - SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo, bool genAfterBlock) { + SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; int32_t rows = pResBlock->info.rows; @@ -427,7 +427,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } - if (start.key == INT64_MIN || end.key == INT64_MIN || genAfterBlock) { + if (start.key == INT64_MIN || end.key == INT64_MIN) { colDataSetNULL(pDst, rows); break; } @@ -463,13 +463,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } - if (genAfterBlock && rows == 0) { - hasInterp = false; - break; - } - SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot); - if (pkey->isNull == false && !genAfterBlock) { + if (pkey->isNull == false) { code = colDataSetVal(pDst, rows, pkey->pData, false); QUERY_CHECK_CODE(code, lino, _end); } else { @@ -841,7 +836,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); if (nextTs > pSliceInfo->current) { while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { - if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo, false) && + if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo) && pSliceInfo->fillType == TSDB_FILL_LINEAR) { break; } else { @@ -869,7 +864,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS doKeepLinearInfo(pSliceInfo, pBlock, i); while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) { - if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo, false) && + if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo) && pSliceInfo->fillType == TSDB_FILL_LINEAR) { break; } else { @@ -914,12 +909,13 @@ static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperato SSDataBlock* pResBlock = pSliceInfo->pRes; SInterval* pInterval = &pSliceInfo->interval; - if (pSliceInfo->pPrevGroupKey == NULL) { + if (pSliceInfo->fillType == TSDB_FILL_NEXT || pSliceInfo->fillType == TSDB_FILL_LINEAR || + pSliceInfo->pPrevGroupKey == NULL) { return; } while (pSliceInfo->current <= pSliceInfo->win.ekey) { - (void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo, true); + (void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo); pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); } diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 3cdf52725a..bcfc389d7b 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -907,7 +907,7 @@ class TDTestCase: ## {. . .} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") - tdSql.checkRows(13) + tdSql.checkRows(12) tdSql.checkData(0, 0, 5) tdSql.checkData(1, 0, 5) tdSql.checkData(2, 0, 10) @@ -920,7 +920,6 @@ class TDTestCase: tdSql.checkData(9, 0, 15) tdSql.checkData(10, 0, 15) tdSql.checkData(11, 0, 15) - tdSql.checkData(12, 0, None) ## {} ... tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(next)") @@ -958,12 +957,10 @@ class TDTestCase: ## ..{.} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)") - tdSql.checkRows(5) + tdSql.checkRows(3) tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) tdSql.checkData(2, 0, 15) - tdSql.checkData(3, 0, None) - tdSql.checkData(4, 0, None) ## ... {} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)") @@ -1275,7 +1272,7 @@ class TDTestCase: tdSql.checkData(8, 1, True) tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") - tdSql.checkRows(13) + tdSql.checkRows(12) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-01 00:00:04.000') @@ -1290,7 +1287,6 @@ class TDTestCase: tdSql.checkData(9, 0, '2020-02-01 00:00:13.000') tdSql.checkData(10, 0, '2020-02-01 00:00:14.000') tdSql.checkData(11, 0, '2020-02-01 00:00:15.000') - tdSql.checkData(12, 0, '2020-02-01 00:00:16.000') tdSql.checkData(0, 1, True) tdSql.checkData(1, 1, False) @@ -1304,7 +1300,6 @@ class TDTestCase: tdSql.checkData(9, 1, True) tdSql.checkData(10, 1, True) tdSql.checkData(11, 1, False) - tdSql.checkData(12, 1, True) tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)") tdSql.checkRows(6) @@ -1682,13 +1677,9 @@ class TDTestCase: ## | . | { | .} | tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(next)") - tdSql.checkRows(6) + tdSql.checkRows(2) tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) - tdSql.checkData(2, 0, None) - tdSql.checkData(3, 0, None) - tdSql.checkData(4, 0, None) - tdSql.checkData(5, 0, None) # test fill linear @@ -2741,7 +2732,7 @@ class TDTestCase: tdSql.checkData(4, i, 15) tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)") - tdSql.checkRows(5) + tdSql.checkRows(3) tdSql.checkCols(4) for i in range (tdSql.queryCols): @@ -2837,7 +2828,7 @@ class TDTestCase: # test fill next tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(next)") - tdSql.checkRows(19) + tdSql.checkRows(18) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-02 00:00:00.000') @@ -2860,7 +2851,6 @@ class TDTestCase: tdSql.checkData(15, 2, None) tdSql.checkData(16, 2, None) tdSql.checkData(17, 2, None) - tdSql.checkData(18, 2, None) tdSql.checkData(17, 0, '2020-02-02 00:00:17.000') @@ -3091,7 +3081,7 @@ class TDTestCase: # test fill linear tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(linear)") - tdSql.checkRows(18) + tdSql.checkRows(17) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-02 00:00:01.000') @@ -3113,9 +3103,8 @@ class TDTestCase: tdSql.checkData(14, 2, None) tdSql.checkData(15, 2, None) tdSql.checkData(16, 2, None) - tdSql.checkData(17, 2, None) - tdSql.checkData(17, 0, '2020-02-02 00:00:18.000') + tdSql.checkData(16, 0, '2020-02-02 00:00:17.000') tdLog.printNoPrefix("==========step13:test error cases") @@ -3231,7 +3220,7 @@ class TDTestCase: tdSql.checkData(17, 1, True) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(19) + tdSql.checkRows(18) tdSql.checkData(0, 0, '2020-02-01 00:00:00.000') tdSql.checkData(0, 1, True) @@ -3254,12 +3243,9 @@ class TDTestCase: tdSql.checkData(15, 2, 15) tdSql.checkData(16, 2, 17) tdSql.checkData(17, 2, 17) - tdSql.checkData(18, 2, None) tdSql.checkData(17, 0, '2020-02-01 00:00:17.000') tdSql.checkData(17, 1, False) - tdSql.checkData(18, 0, '2020-02-01 00:00:18.000') - tdSql.checkData(18, 1, True) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(17) @@ -3376,24 +3362,24 @@ class TDTestCase: tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(57) - for i in range(0, 19): + tdSql.checkRows(48) + for i in range(0, 14): tdSql.checkData(i, 0, 'ctb1') - for i in range(19, 38): + for i in range(14, 30): tdSql.checkData(i, 0, 'ctb2') - for i in range(38, 57): + for i in range(30, 48): tdSql.checkData(i, 0, 'ctb3') tdSql.checkData(0, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(18, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(13, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(19, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(37, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:00.000') + tdSql.checkData(29, 1, '2020-02-01 00:00:15.000') - tdSql.checkData(38, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(56, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(30, 1, '2020-02-01 00:00:00.000') + tdSql.checkData(47, 1, '2020-02-01 00:00:17.000') for i in range(0, 2): tdSql.checkData(i, 3, 1) @@ -3404,33 +3390,24 @@ class TDTestCase: for i in range(8, 14): tdSql.checkData(i, 3, 13) - for i in range(14, 19): - tdSql.checkData(i, 3, None) - - for i in range(19, 23): + for i in range(14, 18): tdSql.checkData(i, 3, 3) - for i in range(23, 29): + for i in range(18, 24): tdSql.checkData(i, 3, 9) - for i in range(29, 35): + for i in range(24, 30): tdSql.checkData(i, 3, 15) - for i in range(35, 38): - tdSql.checkData(i, 3, None) - - for i in range(38, 44): + for i in range(30, 36): tdSql.checkData(i, 3, 5) - for i in range(44, 50): + for i in range(36, 42): tdSql.checkData(i, 3, 11) - for i in range(50, 56): + for i in range(42, 48): tdSql.checkData(i, 3, 17) - for i in range(56, 57): - tdSql.checkData(i, 3, None) - tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(39) @@ -3473,7 +3450,7 @@ class TDTestCase: tdSql.checkRows(90) tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(171) + tdSql.checkRows(90) tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(9) @@ -3490,7 +3467,7 @@ class TDTestCase: tdSql.checkRows(48) tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(57) + tdSql.checkRows(48) tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(39) @@ -4386,7 +4363,7 @@ class TDTestCase: tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") - tdSql.checkRows(11) + tdSql.checkRows(9) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, False) @@ -4396,8 +4373,6 @@ class TDTestCase: tdSql.checkData(6, 1, True) tdSql.checkData(7, 1, False) tdSql.checkData(8, 1, False) - tdSql.checkData(9, 1, True) - tdSql.checkData(10, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 3) @@ -4408,13 +4383,11 @@ class TDTestCase: tdSql.checkData(6, 2, 8) tdSql.checkData(7, 2, 8) tdSql.checkData(8, 2, 9) - tdSql.checkData(9, 2, None) - tdSql.checkData(10, 2, None) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") - tdSql.checkRows(11) + tdSql.checkRows(9) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, False) @@ -4424,9 +4397,6 @@ class TDTestCase: tdSql.checkData(6, 1, True) tdSql.checkData(7, 1, False) tdSql.checkData(8, 1, False) - tdSql.checkData(9, 1, True) - tdSql.checkData(10, 1, True) - tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 3) @@ -4437,8 +4407,6 @@ class TDTestCase: tdSql.checkData(6, 2, 8) tdSql.checkData(7, 2, 8) tdSql.checkData(8, 2, 9) - tdSql.checkData(9, 2, None) - tdSql.checkData(10, 2, None) # super table tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") @@ -4475,7 +4443,7 @@ class TDTestCase: tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(9) + tdSql.checkRows(8) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, True) @@ -4484,7 +4452,6 @@ class TDTestCase: tdSql.checkData(5, 1, True) tdSql.checkData(6, 1, False) tdSql.checkData(7, 1, False) - tdSql.checkData(8, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 9) @@ -4494,12 +4461,11 @@ class TDTestCase: tdSql.checkData(5, 2, 13) tdSql.checkData(6, 2, 13) tdSql.checkData(7, 2, 15) - tdSql.checkData(8, 2, None) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(9) + tdSql.checkRows(8) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, True) @@ -4508,7 +4474,6 @@ class TDTestCase: tdSql.checkData(5, 1, True) tdSql.checkData(6, 1, False) tdSql.checkData(7, 1, False) - tdSql.checkData(8, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 9) @@ -4518,37 +4483,36 @@ class TDTestCase: tdSql.checkData(5, 2, 13) tdSql.checkData(6, 2, 13) tdSql.checkData(7, 2, 15) - tdSql.checkData(8, 2, None) tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(18) - for i in range(0, 9): + tdSql.checkRows(15) + for i in range(0, 7): tdSql.checkData(i, 0, 'ctb1_null') - for i in range(9, 18): + for i in range(7, 15): tdSql.checkData(i, 0, 'ctb2_null') tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(7, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:15.000') tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(18) - for i in range(0, 9): + tdSql.checkRows(15) + for i in range(0, 7): tdSql.checkData(i, 0, 'ctb1_null') - for i in range(9, 18): + for i in range(7, 15): tdSql.checkData(i, 0, 'ctb2_null') tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(7, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:15.000') # fill linear # normal table From 3310e8145620ff06332d262ad2411ada14bb36ab Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 10:15:12 +0800 Subject: [PATCH 18/37] make sure mnode can be started --- source/libs/wal/src/walMeta.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 8649581d5d..042024284c 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -288,6 +288,11 @@ static void walAlignVersions(SWal* pWal) { pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); pWal->vers.snapshotVer = pWal->cfg.committed; } + if (pWal->vers.snapshotVer < 0) { + wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".", + pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer); + pWal->vers.snapshotVer = pWal->vers.firstVer; + } if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) { wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId, pWal->vers.firstVer, pWal->vers.snapshotVer); From d0a0d578bd149e82f0623f35a52378393e229391 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 02:16:31 +0000 Subject: [PATCH 19/37] fix/TS-5533-update-os-info-when-monitor --- source/dnode/mgmt/node_util/src/dmUtil.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index b50c746c92..f8c0955745 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,6 +74,7 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; + osUpdate(); pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From f206837d48f86fb3102c2e40626a7cbe5cdc6c5f Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 10:30:27 +0800 Subject: [PATCH 20/37] modify log level while walLoadMeta failed --- source/libs/wal/src/walMgmt.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c index 3b23a2db80..d8a58efe4e 100644 --- a/source/libs/wal/src/walMgmt.c +++ b/source/libs/wal/src/walMgmt.c @@ -91,7 +91,8 @@ static int32_t walInitLock(SWal *pWal) { } SWal *walOpen(const char *path, SWalCfg *pCfg) { - SWal *pWal = taosMemoryCalloc(1, sizeof(SWal)); + int32_t code = 0; + SWal *pWal = taosMemoryCalloc(1, sizeof(SWal)); if (pWal == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); return NULL; @@ -160,17 +161,20 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) { pWal->writeHead.magic = WAL_MAGIC; // load meta - if (walLoadMeta(pWal) < 0) { - wInfo("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(terrno)); + code = walLoadMeta(pWal); + if (code < 0) { + wWarn("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(code)); } - if (walCheckAndRepairMeta(pWal) < 0) { - wError("vgId:%d, cannot open wal since repair meta file failed", pWal->cfg.vgId); + code = walCheckAndRepairMeta(pWal); + if (code < 0) { + wError("vgId:%d, cannot open wal since repair meta file failed since %s", pWal->cfg.vgId, tstrerror(code)); goto _err; } - if (walCheckAndRepairIdx(pWal) < 0) { - wError("vgId:%d, cannot open wal since repair idx file failed", pWal->cfg.vgId); + code = walCheckAndRepairIdx(pWal); + if (code < 0) { + wError("vgId:%d, cannot open wal since repair idx file failed since %s", pWal->cfg.vgId, tstrerror(code)); goto _err; } From dca0822b5d424ebadbbe52c0570dee7349812e74 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 16 Oct 2024 10:47:02 +0800 Subject: [PATCH 21/37] feat: support query-QPS new feature --- tools/auto/testCompression/testCompression.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/tools/auto/testCompression/testCompression.py b/tools/auto/testCompression/testCompression.py index 281a097f8a..ee922a1a23 100644 --- a/tools/auto/testCompression/testCompression.py +++ b/tools/auto/testCompression/testCompression.py @@ -134,8 +134,6 @@ def getMatch(datatype, algo): def generateJsonFile(algo): - print(f"doTest algo: {algo} \n") - # replace datatype context = readFileContext(templateFile) # replace compress @@ -192,8 +190,6 @@ def findContextValue(context, label): ends = [',','}',']', 0] while context[end] not in ends: end += 1 - - print(f"start = {start} end={end}\n") return context[start:end] @@ -281,10 +277,10 @@ def testQuery(): # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 speed = None - for i in range(20, len(lines)): + for i in range(0, len(lines)): # find second real + context = lines[i] pos = context.find("the QPS of all threads:") - context = lines[26] if pos == -1 : continue pos += 24 @@ -302,7 +298,6 @@ def doTest(algo, resultFile): print(f"doTest algo: {algo} \n") #cleanAndStartTaosd() - # json jsonFile = generateJsonFile(algo) From de7006743627a2e561f833c2117f17b8afce0791 Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Wed, 16 Oct 2024 10:48:41 +0800 Subject: [PATCH 22/37] fix(query)[TD-32564]. Fix memory leak in exceptional cases In function tsdbTFileSetInitRef, clear all FileObj stored in the variable lvl when an error occurs, and release the memory allocated for lvl itself. --- source/dnode/vnode/src/tsdb/tsdbFSet2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbFSet2.c b/source/dnode/vnode/src/tsdb/tsdbFSet2.c index fc681f9753..a0ae58ac96 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFSet2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFSet2.c @@ -602,14 +602,14 @@ int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs SSttLvl *lvl; code = tsdbSttLvlInitRef(pTsdb, lvl1, &lvl); if (code) { - taosMemoryFree(lvl); + tsdbSttLvlClear(&lvl); tsdbTFileSetClear(fset); return code; } code = TARRAY2_APPEND(fset[0]->lvlArr, lvl); if (code) { - taosMemoryFree(lvl); + tsdbSttLvlClear(&lvl); tsdbTFileSetClear(fset); return code; } From 2520eead4842472e3f03690041d02bcaca273269 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 04:59:02 +0000 Subject: [PATCH 23/37] fix/TS-5533-update-os-info-when-monitor-fix-check --- source/dnode/mgmt/node_util/src/dmUtil.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index f8c0955745..3a6c73a1bc 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,7 +74,10 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; - osUpdate(); + code = osUpdate(); + if (code != 0) { + dError("failed to update os info since %s", tstrerror(code)); + } pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From 48d9f2da65e8aea47a03e44f30084bbac478a06a Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 13:38:45 +0800 Subject: [PATCH 24/37] fix ci walTest --- source/libs/wal/src/walMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 042024284c..92ad760a20 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -288,7 +288,7 @@ static void walAlignVersions(SWal* pWal) { pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); pWal->vers.snapshotVer = pWal->cfg.committed; } - if (pWal->vers.snapshotVer < 0) { + if (pWal->vers.snapshotVer < 0 && pWal->vers.firstVer > 0) { wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".", pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer); pWal->vers.snapshotVer = pWal->vers.firstVer; From 544a1828e6c094d4ed874c36893e44afc2986f29 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 07:00:28 +0000 Subject: [PATCH 25/37] fix/TS-5532-add-more-log-status-msg --- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index f1f3a3bee7..87b1ae0efa 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -123,6 +123,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { int32_t code = 0; SStatusReq req = {0}; + dDebug("send status req to mnode, statusSeq:%d, begin to mgnt lock", pMgmt->statusSeq); (void)taosThreadRwlockRdlock(&pMgmt->pData->lock); req.sver = tsVersion; req.dnodeVer = pMgmt->pData->dnodeVer; @@ -161,14 +162,17 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { memcpy(req.clusterCfg.charset, tsCharset, TD_LOCALE_LEN); (void)taosThreadRwlockUnlock(&pMgmt->pData->lock); + dDebug("send status req to mnode, statusSeq:%d, begin to get vnode loads", pMgmt->statusSeq); SMonVloadInfo vinfo = {0}; (*pMgmt->getVnodeLoadsFp)(&vinfo); req.pVloads = vinfo.pVloads; + dDebug("send status req to mnode, statusSeq:%d, begin to get mnode loads", pMgmt->statusSeq); SMonMloadInfo minfo = {0}; (*pMgmt->getMnodeLoadsFp)(&minfo); req.mload = minfo.load; + dDebug("send status req to mnode, statusSeq:%d, begin to get qnode loads", pMgmt->statusSeq); (*pMgmt->getQnodeLoadsFp)(&req.qload); pMgmt->statusSeq++; @@ -206,6 +210,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { int8_t epUpdated = 0; (void)dmGetMnodeEpSet(pMgmt->pData, &epSet); + dDebug("send status req to mnode, statusSeq:%d, begin to send rpc msg", pMgmt->statusSeq); code = rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000); if (code != 0) { From b56701e05c9e940346dde1463ebf42bad7b7d38a Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 16 Oct 2024 16:31:30 +0800 Subject: [PATCH 26/37] fix: remove invalid error code check and add meta data recover and compact function --- source/dnode/mgmt/exe/dmMain.c | 3 + source/dnode/vnode/src/inc/vnodeInt.h | 3 + source/dnode/vnode/src/meta/metaOpen.c | 171 +++++++++++++++++++++++- source/dnode/vnode/src/meta/metaTable.c | 3 - 4 files changed, 175 insertions(+), 5 deletions(-) diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index ba162bd84f..1089b0eced 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -182,6 +182,7 @@ static void dmSetSignalHandle() { } #endif } +extern bool generateNewMeta; static int32_t dmParseArgs(int32_t argc, char const *argv[]) { global.startTime = taosGetTimestampMs(); @@ -221,6 +222,8 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) { global.dumpSdb = true; } else if (strcmp(argv[i], "-dTxn") == 0) { global.deleteTrans = true; + } else if (strcmp(argv[i], "-r") == 0) { + generateNewMeta = true; } else if (strcmp(argv[i], "-E") == 0) { if (i < argc - 1) { if (strlen(argv[++i]) >= PATH_MAX) { diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 1bd4317234..fc98d6578b 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -81,6 +81,9 @@ typedef struct SCommitInfo SCommitInfo; typedef struct SCompactInfo SCompactInfo; typedef struct SQueryNode SQueryNode; +#define VNODE_META_TMP_DIR "meta.tmp" +#define VNODE_META_BACKUP_DIR "meta.backup" + #define VNODE_META_DIR "meta" #define VNODE_TSDB_DIR "tsdb" #define VNODE_TQ_DIR "tq" diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index f062505ac7..ef36521879 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -133,7 +133,7 @@ static void doScan(SMeta *pMeta) { } } -int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { +static int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) { SMeta *pMeta = NULL; int32_t code = 0; int32_t lino; @@ -144,7 +144,11 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { // create handle vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN); offset = strlen(path); - snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, VNODE_META_DIR); + snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir); + + if (strncmp(metaDir, VNODE_META_TMP_DIR, strlen(VNODE_META_TMP_DIR)) == 0) { + taosRemoveDir(path); + } if ((pMeta = taosMemoryCalloc(1, sizeof(*pMeta) + strlen(path) + 1)) == NULL) { TSDB_CHECK_CODE(code = terrno, lino, _exit); @@ -245,6 +249,169 @@ _exit: return code; } +bool generateNewMeta = false; + +static int32_t metaGenerateNewMeta(SMeta **ppMeta) { + SMeta *pNewMeta = NULL; + SMeta *pMeta = *ppMeta; + SVnode *pVnode = pMeta->pVnode; + + metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode)); + + // Open a new meta for orgainzation + int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false); + if (code) { + return code; + } + + code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL); + if (code) { + return code; + } + + // i == 0, scan super table + // i == 1, scan normal table and child table + for (int i = 0; i < 2; i++) { + TBC *uidCursor = NULL; + int32_t counter = 0; + + code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL); + if (code) { + metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + code = tdbTbcMoveToFirst(uidCursor); + if (code) { + metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code)); + tdbTbcClose(uidCursor); + return code; + } + + for (;;) { + const void *pKey; + int kLen; + const void *pVal; + int vLen; + + if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) { + break; + } + + tb_uid_t uid = *(tb_uid_t *)pKey; + SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal; + if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table + || (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table + ) { + counter++; + if (i == 0) { + metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid); + } else { + metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, + pUidIdxVal->suid == 0 ? "normal" : "child", uid); + } + + // fetch table entry + void *value = NULL; + int valueSize = 0; + if (tdbTbGet(pMeta->pTbDb, + &(STbDbKey){ + .version = pUidIdxVal->version, + .uid = uid, + }, + sizeof(uid), &value, &valueSize) == 0) { + SDecoder dc = {0}; + SMetaEntry me = {0}; + tDecoderInit(&dc, value, valueSize); + if (metaDecodeEntry(&dc, &me) == 0) { + if (metaHandleEntry(pNewMeta, &me) != 0) { + metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid); + } + } + tDecoderClear(&dc); + } + tdbFree(value); + } + + code = tdbTbcMoveToNext(uidCursor); + if (code) { + metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + } + + tdbTbcClose(uidCursor); + } + + code = metaCommit(pNewMeta, pNewMeta->txn); + if (code) { + metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + code = metaFinishCommit(pNewMeta, pNewMeta->txn); + if (code) { + metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) { + metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + metaClose(&pNewMeta); + metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode)); + return 0; +} + +int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { + int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + if (code) { + return code; + } + + if (generateNewMeta) { + // backup the old meta + char path[TSDB_FILENAME_LEN] = {0}; + char oldMetaPath[TSDB_FILENAME_LEN] = {0}; + char newMetaPath[TSDB_FILENAME_LEN] = {0}; + char backupMetaPath[TSDB_FILENAME_LEN] = {0}; + + vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN); + snprintf(oldMetaPath, sizeof(oldMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_DIR); + snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR); + snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR); + + if (taosCheckExistFile(backupMetaPath)) { + metaError("vgId:%d backup meta already exists, please check", TD_VID(pVnode)); + return TSDB_CODE_FAILED; + } + + code = metaGenerateNewMeta(ppMeta); + if (code) { + metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + + metaClose(ppMeta); + if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { + metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + + // rename the new meta to old meta + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); + if (code) { + metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + } + + return TSDB_CODE_SUCCESS; +} + int32_t metaUpgrade(SVnode *pVnode, SMeta **ppMeta) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino; diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 08ee422126..21d12ef77d 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2985,9 +2985,6 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { } } end: - if (terrno != 0) { - ret = terrno; - } tDecoderClear(&dc); tdbFree(pData); return ret; From 75650908ba622638ce98415b894d322769349934 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 09:17:21 +0000 Subject: [PATCH 27/37] fix/TS-5533-revert-add-osupdate-when-monitor --- source/dnode/mgmt/node_util/src/dmUtil.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index 3a6c73a1bc..b50c746c92 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,10 +74,6 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; - code = osUpdate(); - if (code != 0) { - dError("failed to update os info since %s", tstrerror(code)); - } pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From 1d018d0d287cc428cdf3656e964e6c492784fd1e Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 16 Oct 2024 18:05:35 +0800 Subject: [PATCH 28/37] enh: stmt2Perf add p90 p95 min max --- .../auto/stmt2Performance/json/template.json | 6 +- tools/auto/stmt2Performance/stmt2Perf.py | 110 +++++++++++++----- 2 files changed, 86 insertions(+), 30 deletions(-) diff --git a/tools/auto/stmt2Performance/json/template.json b/tools/auto/stmt2Performance/json/template.json index 659c5966a4..8c54c5be22 100644 --- a/tools/auto/stmt2Performance/json/template.json +++ b/tools/auto/stmt2Performance/json/template.json @@ -16,14 +16,14 @@ { "dbinfo": { "name": "dbrate", - "drop": "yes", - "vgroups": 2 + "vgroups": 1, + "drop": "yes" }, "super_tables": [ { "name": "meters", "child_table_exists": "no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "d", "insert_mode": "@STMT_MODE", "interlace_rows": @INTERLACE_MODE, diff --git a/tools/auto/stmt2Performance/stmt2Perf.py b/tools/auto/stmt2Performance/stmt2Perf.py index e7a4d5ecbe..4d99f2483d 100644 --- a/tools/auto/stmt2Performance/stmt2Perf.py +++ b/tools/auto/stmt2Performance/stmt2Perf.py @@ -198,16 +198,20 @@ def findContextValue(context, label): def writeTemplateInfo(resultFile): # create info - context = readFileContext(templateFile) + context = readFileContext(templateFile) vgroups = findContextValue(context, "vgroups") childCount = findContextValue(context, "childtable_count") insertRows = findContextValue(context, "insert_rows") - line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n" + bindVGroup = findContextValue(context, "thread_bind_vgroup") + nThread = findContextValue(context, "thread_count") + if bindVGroup.lower().find("yes") != -1: + nThread = vgroups + line = f"thread_bind_vgroup = {bindVGroup}\nvgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\ninsertThreads = {nThread} \n\n" print(line) appendFileContext(resultFile, line) -def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): +def totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed): global Number # flush command = 'taos -s "flush database dbrate;"' @@ -220,7 +224,7 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): # read compress rate command = 'taos -s "show table distributed dbrate.meters\G;"' rets = runRetList(command) - print(rets) + #print(rets) str1 = rets[5] arr = str1.split(" ") @@ -234,46 +238,88 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): str2 = arr[6] pos = str2.find("=[") rate = str2[pos+2:] - print("rate =" + rate) # total data file size #dataSize = getFolderSize(f"{dataDir}/vnode/") #dataSizeMB = int(dataSize/1024/1024) # appand to file - + + # %("No", "stmtMode", "interlaceRows", "spent", "spent-real", "writeSpeed", "write-real", "query-QPS", "dataSize", "rate") Number += 1 - context = "%10s %10s %15s %10s %10s %30s %15s\n"%( Number, stmt, interlace, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed) + ''' + context = "%2s %6s %10s %10s %10s %15s %15s %16s %16s %16s %16s %16s %8s %8s %8s\n"%( + Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " rows/s", writeReal + " rows/s", + min, avg, p90, p99, max, + querySpeed, str(totalSize) + " MB", rate + "%") + ''' + context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%( + Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + "r/s", writeReal + "r/s", + min, avg, p90, p99, max + "ms", + querySpeed, str(totalSize) + " MB", rate + "%") + showLog(context) appendFileContext(resultFile, context) +def cutEnd(line, start, endChar): + pos = line.find(endChar, start) + if pos == -1: + return line[start:] + return line[start : pos] + +def findValue(context, pos, key, endChar,command): + pos = context.find(key, pos) + if pos == -1: + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") + exit(1) + pos += len(key) + value = cutEnd(context, pos, endChar) + return (value, pos) + def testWrite(jsonFile): command = f"taosBenchmark -f {jsonFile}" output, context = run(command, 60000) + print(context) + # SUCC: Spent 0.960248 (real 0.947154) seconds to insert rows: 100000 with 1 thread(s) into dbrate 104139.76 (real 105579.45) records/second - # find second real - pos = context.find("(real ") + # spent + key = "Spent " + pos = -1 + pos1 = 0 + while pos1 != -1: # find last "Spent " + pos1 = context.find(key, pos1) + if pos1 != -1: + pos = pos1 # update last found + pos1 += len(key) if pos == -1: - print(f"error, run command={command} output not found first \"(real\" keyword. error={context}") + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") exit(1) - pos = context.find("(real ", pos + 5) + pos += len(key) + spent = cutEnd(context, pos, ".") + + # spent-real + spentReal, pos = findValue(context, pos, "(real ", ".", command) + + # writeSpeed + key = "into " + pos = context.find(key, pos) if pos == -1: - print(f"error, run command={command} output not found second \"(real\" keyword. error={context}") - exit(1) - - pos += 5 - length = len(context) - while pos < length and context[pos] == ' ': - pos += 1 - end = context.find(".", pos) - if end == -1: - print(f"error, run command={command} output not found second \".\" keyword. error={context}") + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") exit(1) + pos += len(key) + writeSpeed, pos = findValue(context, pos, " ", ".", command) + # writeReal + writeReal, pos = findValue(context, pos, "(real ", ".", command) - speed = context[pos: end] - #print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n") - return speed + # delay + min, pos = findValue(context, pos, "min: ", ",", command) + avg, pos = findValue(context, pos, "avg: ", ",", command) + p90, pos = findValue(context, pos, "p90: ", ",", command) + p99, pos = findValue(context, pos, "p99: ", ",", command) + max, pos = findValue(context, pos, "max: ", "ms", command) + + return (spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max) def testQuery(): command = f"taosBenchmark -f json/query.json" @@ -308,13 +354,13 @@ def doTest(stmt, interlace, resultFile): # run taosBenchmark t1 = time.time() - writeSpeed = testWrite(jsonFile) + spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max = testWrite(jsonFile) t2 = time.time() # total write speed querySpeed = testQuery() # total compress rate - totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed) + totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed) def main(): @@ -333,7 +379,17 @@ def main(): # json info writeTemplateInfo(resultFile) # head - context = "\n%10s %10s %15s %10s %10s %30s %15s\n"%("No", "stmtMode", "interlaceRows", "dataSize", "rate", "writeSpeed", "query-QPS") + ''' + context = "%3s %8s %10s %10s %10s %15s %15s %10s %10s %10s %10s %10s %8s %8s %8s\n"%( + "No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real", + "min", "avg", "p90", "p99", "max", + "query-QPS", "dataSize", "rate") + ''' + context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%( + "No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real", + "min", "avg", "p90", "p99", "max", + "query-QPS", "dataSize", "rate") + appendFileContext(resultFile, context) From 27c087e9aec5bd1a499a4c2e545c283d7536b727 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 16 Oct 2024 18:36:49 +0800 Subject: [PATCH 29/37] refactor: do some internal refactor. --- source/dnode/mnode/impl/src/mndStream.c | 3 ++- source/dnode/mnode/impl/src/mndStreamTransAct.c | 9 +++------ source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 +- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 69d3de25fc..a4327b777f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1783,7 +1783,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; - int32_t code = 0; + int32_t code = 0; if ((code = grantCheckExpire(TSDB_GRANT_STREAMS)) < 0) { return code; @@ -1811,6 +1811,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { return 0; } + mInfo("stream:%s,%" PRId64 " start to resume stream from pause", resumeReq.name, pStream->uid); if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { sdbRelease(pMnode->pSdb, pStream); return -1; diff --git a/source/dnode/mnode/impl/src/mndStreamTransAct.c b/source/dnode/mnode/impl/src/mndStreamTransAct.c index 4e0bf97587..139ea4f147 100644 --- a/source/dnode/mnode/impl/src/mndStreamTransAct.c +++ b/source/dnode/mnode/impl/src/mndStreamTransAct.c @@ -61,7 +61,6 @@ static int32_t doSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa static int32_t doSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) { SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq)); if (pReq == NULL) { - // terrno = TSDB_CODE_OUT_OF_MEMORY; return terrno; } @@ -93,7 +92,6 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT if (pReq == NULL) { mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq), tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - // terrno = TSDB_CODE_OUT_OF_MEMORY; return terrno; } @@ -106,19 +104,18 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT bool hasEpset = false; int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); if (code != TSDB_CODE_SUCCESS || (!hasEpset)) { - terrno = code; taosMemoryFree(pReq); - return terrno; + return code; } code = setTransAction(pTrans, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID); if (code != 0) { taosMemoryFree(pReq); - return terrno; + return code; } mDebug("set the resume action for trans:%d", pTrans->id); - return 0; + return code; } static int32_t doSetDropActionFromId(SMnode *pMnode, STrans *pTrans, SOrphanTask* pTask) { diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 44a39f4328..c4971e27cf 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -5910,7 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_ } else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing } else { code = TSDB_CODE_INVALID_PARA; - tsdbError("invalid mr.me.type:%d %s, code:%s", mr.me.type, tstrerror(code)); + tsdbError("invalid mr.me.type:%d, code:%s", mr.me.type, tstrerror(code)); metaReaderClear(&mr); return code; } From e893547c1471325b9d93a273d23f36d55c346a3f Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 10:58:26 +0000 Subject: [PATCH 30/37] fix/TD-32583-remove-useless-timer-execution --- source/libs/sync/inc/syncEnv.h | 1 - source/libs/sync/src/syncMain.c | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/source/libs/sync/inc/syncEnv.h b/source/libs/sync/inc/syncEnv.h index 0376920e8a..caf0e88457 100644 --- a/source/libs/sync/inc/syncEnv.h +++ b/source/libs/sync/inc/syncEnv.h @@ -24,7 +24,6 @@ extern "C" { #define TIMER_MAX_MS 0x7FFFFFFF #define PING_TIMER_MS 5000 -#define HEARTBEAT_TICK_NUM 20 typedef struct SSyncEnv { uint8_t isStart; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 451e82c7d4..3d37cdb560 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -977,9 +977,10 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) { pData->logicClock = pSyncTimer->logicClock; pData->execTime = tsNow + pSyncTimer->timerMS; - sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, pData->rid, pData->destId.addr); + sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64 " at %d", pSyncNode->vgId, pData->rid, + pData->destId.addr, pSyncTimer->timerMS); - TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)(pData->rid), + TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid), syncEnv()->pTimerManager, &pSyncTimer->pTimer)); } else { code = TSDB_CODE_SYN_INTERNAL_ERROR; @@ -2711,7 +2712,8 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { return; } - sTrace("vgId:%d, eq peer hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, pData->destId.addr); + sTrace("vgId:%d, peer hb timer execution, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, + pData->destId.addr); if (pSyncNode->totalReplicaNum > 1) { int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock); @@ -2753,13 +2755,12 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { if (ret != 0) { sError("vgId:%d, failed to send heartbeat since %s", pSyncNode->vgId, tstrerror(ret)); } - } else { } if (syncIsInit()) { - // sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId); - if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, - (void*)hbDataRid, syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { + sTrace("vgId:%d, reset peer hb timer at %d", pSyncNode->vgId, pSyncTimer->timerMS); + if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid, + syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code)); syncNodeRelease(pSyncNode); syncHbTimerDataRelease(pData); From 71a762db7526de763b615044ff7ffb893bacb4a3 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 16 Oct 2024 19:32:41 +0800 Subject: [PATCH 31/37] add more error handle --- source/dnode/vnode/src/meta/metaOpen.c | 77 ++++++++++++++++---------- source/libs/tdb/src/db/tdbBtree.c | 3 + 2 files changed, 51 insertions(+), 29 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index ef36521879..8f2c0b5a5e 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -364,13 +364,7 @@ static int32_t metaGenerateNewMeta(SMeta **ppMeta) { } int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { - int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); - if (code) { - return code; - } - if (generateNewMeta) { - // backup the old meta char path[TSDB_FILENAME_LEN] = {0}; char oldMetaPath[TSDB_FILENAME_LEN] = {0}; char newMetaPath[TSDB_FILENAME_LEN] = {0}; @@ -381,32 +375,57 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR); snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR); - if (taosCheckExistFile(backupMetaPath)) { - metaError("vgId:%d backup meta already exists, please check", TD_VID(pVnode)); + bool oldMetaExist = taosCheckExistFile(oldMetaPath); + bool newMetaExist = taosCheckExistFile(newMetaPath); + bool backupMetaExist = taosCheckExistFile(backupMetaPath); + + if ((!backupMetaExist && !oldMetaExist && newMetaExist) // case 2 + || (backupMetaExist && !oldMetaExist && !newMetaExist) // case 4 + || (backupMetaExist && oldMetaExist && newMetaExist) // case 8 + ) { + metaError("vgId:%d invalid meta state, please check", TD_VID(pVnode)); return TSDB_CODE_FAILED; + } else if ((backupMetaExist && oldMetaExist && !newMetaExist) // case 7 + || (!backupMetaExist && !oldMetaExist && !newMetaExist) // case 1 + ) { + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + } else if (backupMetaExist && !oldMetaExist && newMetaExist) { + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + } else { + int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + if (code) { + return code; + } + + code = metaGenerateNewMeta(ppMeta); + if (code) { + metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + + metaClose(ppMeta); + if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { + metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + + // rename the new meta to old meta + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); + if (code) { + metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } } - code = metaGenerateNewMeta(ppMeta); - if (code) { - metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); - } - - metaClose(ppMeta); - if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { - metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - - // rename the new meta to old meta - if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { - metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); - if (code) { - metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); - return code; - } + } else { + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); } return TSDB_CODE_SUCCESS; diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index c688a6cc6a..2333a4a6a2 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1446,6 +1446,9 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, return ret; } ofpCell = tdbPageGetCell(ofp, 0); + if (ofpCell == NULL) { + return TSDB_CODE_INVALID_DATA_FMT; + } if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { bytes = nLeft; From d63040f2f77e48cd67593e29059009f36b731c34 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 16 Oct 2024 19:41:29 +0800 Subject: [PATCH 32/37] TS-5459: add test case --- tests/system-test/2-query/slow_query_basic.py | 66 +++++++++++++++++++ tests/system-test/win-test-file | 1 + 2 files changed, 67 insertions(+) create mode 100644 tests/system-test/2-query/slow_query_basic.py diff --git a/tests/system-test/2-query/slow_query_basic.py b/tests/system-test/2-query/slow_query_basic.py new file mode 100644 index 0000000000..10595028a7 --- /dev/null +++ b/tests/system-test/2-query/slow_query_basic.py @@ -0,0 +1,66 @@ +import random +import string +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +import numpy as np + + +class TDTestCase: + updatecfgDict = {'slowLogThresholdTest': ''} + updatecfgDict["slowLogThresholdTest"] = 0 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def getPath(self, tool="taosBenchmark"): + if (platform.system().lower() == 'windows'): + tool = tool + ".exe" + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def taosBenchmark(self, param): + binPath = self.getPath() + cmd = f"{binPath} {param}" + tdLog.info(cmd) + os.system(cmd) + + def testSlowQuery(self): + self.taosBenchmark(" -d db -t 2 -v 2 -n 1000000 -y") + sql = "select count(*) from db.meters" + for i in range(10): + tdSql.query(sql) + tdSql.checkData(0, 0, 2 * 1000000) + + def run(self): + self.testSlowQuery() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index e86047bca8..c3047efdd7 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -925,3 +925,4 @@ python3 ./test.py -f 99-TDcase/TD-20582.py python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3 python3 ./test.py -f eco-system/meta/database/keep_time_offset.py +python3 ./test.py -f 2-query/slow_query_basic.py From dd05353b74e2b7b84fdaf42adaacb164b53a0fd2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 16 Oct 2024 22:07:37 +0800 Subject: [PATCH 33/37] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 2 +- source/dnode/vnode/src/tqCommon/tqCommon.c | 2 +- source/libs/stream/src/streamCheckStatus.c | 2 +- source/libs/stream/src/streamCheckpoint.c | 3 ++- source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamMeta.c | 5 +++-- source/libs/stream/src/streamSched.c | 4 ++-- source/libs/stream/src/streamTask.c | 3 +++ 8 files changed, 14 insertions(+), 9 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index e6d750468e..58c1707e1f 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -754,7 +754,7 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); -void streamMetaAcquireOneTask(SStreamTask* pTask); +int32_t streamMetaAcquireOneTask(SStreamTask* pTask); void streamMetaClear(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 3871011407..a00e92997c 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -692,7 +692,7 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if ((ppTask != NULL) && ((*ppTask) != NULL)) { - streamMetaAcquireOneTask(*ppTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask); SStreamTask* pTask = *ppTask; if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index 75bcc326b3..c1c54b3c0b 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -299,7 +299,7 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) { return; } - /*SStreamTask* p = */ streamMetaAcquireOneTask(pTask); // add task ref here + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs()); int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index e44bca123b..be914d9746 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -347,7 +347,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock if (old == 0) { int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref); - streamMetaAcquireOneTask(pTask); + + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); pTmrInfo->launchChkptId = pActiveInfo->activeId; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 133663ac28..62d60ff664 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -1162,7 +1162,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) { if (old == 0) { int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref); - streamMetaAcquireOneTask(pTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 29152c6205..7e9b60b61a 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -753,9 +753,10 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task return code; } -void streamMetaAcquireOneTask(SStreamTask* pTask) { +int32_t streamMetaAcquireOneTask(SStreamTask* pTask) { int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref); + return ref; } void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { @@ -866,7 +867,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (ppTask) { pTask = *ppTask; - // it is an fill-history task, remove the related stream task's id that points to it + // it is a fill-history task, remove the related stream task's id that points to it if (pTask->info.fillHistory == 0) { int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1); } diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 095a5af6d4..cdaa603e38 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -22,7 +22,7 @@ static void streamTaskSchedHelper(void* param, void* tmrId); void streamSetupScheduleTrigger(SStreamTask* pTask) { int64_t delaySchema = pTask->info.delaySchedParam; if (delaySchema != 0 && pTask->info.fillHistory == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); + int32_t ref = streamMetaAcquireOneTask(pTask); stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref, pTask->info.delaySchedParam); @@ -80,7 +80,7 @@ void streamTaskResumeInFuture(SStreamTask* pTask) { pTask->status.schedIdleTime, ref); // add one ref count for task - streamMetaAcquireOneTask(pTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer, pTask->pMeta->vgId, "resume-task-tmr"); } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 71a2ed3e4a..727701e03e 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -258,10 +258,12 @@ void tFreeStreamTask(SStreamTask* pTask) { if (pTask->inputq.queue) { streamQueueClose(pTask->inputq.queue, pTask->id.taskId); + pTask->inputq.queue = NULL; } if (pTask->outputq.queue) { streamQueueClose(pTask->outputq.queue, pTask->id.taskId); + pTask->outputq.queue = NULL; } if (pTask->exec.qmsg) { @@ -275,6 +277,7 @@ void tFreeStreamTask(SStreamTask* pTask) { if (pTask->exec.pWalReader != NULL) { walCloseReader(pTask->exec.pWalReader); + pTask->exec.pWalReader = NULL; } streamClearChkptReadyMsg(pTask->chkInfo.pActiveInfo); From 9c7b925613367bc795a30169e48ff6aa2c84014f Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Wed, 16 Oct 2024 18:20:19 +0800 Subject: [PATCH 34/37] fix(query)[TD-30667]. Check hardware support for AVX instructions Modify the CMake script to check for hardware support of AVX instructions directly, instead of relying on compiler flags for the verification. --- cmake/cmake.define | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index 9fae397363..7bcd400fa6 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -169,11 +169,48 @@ ELSE () SET(COMPILER_SUPPORT_AVX512VL false) ELSE() CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA) - CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX) - CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2) CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F) CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI) CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL) + + INCLUDE(CheckCSourceRuns) + SET(CMAKE_REQUIRED_FLAGS "-mavx") + check_c_source_runs(" + #include + int main() { + __m256d a, b, c; + double buf[4] = {0}; + a = _mm256_loadu_pd(buf); + b = _mm256_loadu_pd(buf); + c = _mm256_add_pd(a, b); + _mm256_storeu_pd(buf, c); + for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { + if (buf[i] != 0) { + return 1; + } + } + return 0; + } + " COMPILER_SUPPORT_AVX) + + SET(CMAKE_REQUIRED_FLAGS "-mavx2") + check_c_source_runs(" + #include + int main() { + __m256i a, b, c; + int buf[8] = {0}; + a = _mm256_loadu_si256((__m256i *)buf); + b = _mm256_loadu_si256((__m256i *)buf); + c = _mm256_and_si256(a, b); + _mm256_storeu_si256((__m256i *)buf, c); + for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { + if (buf[i] != 0) { + return 1; + } + } + return 0; + } + " COMPILER_SUPPORT_AVX2) ENDIF() IF (COMPILER_SUPPORT_SSE42) From 9bc38af7edf5dab39006537f034dd4338c91ab0d Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 17 Oct 2024 09:42:36 +0800 Subject: [PATCH 35/37] fix:[TD-32585]remove clean up app info in taos_cleanup --- source/client/src/clientMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index a35c7c7a4c..1a66266000 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -84,7 +84,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); - cleanupAppInfo(); +// cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); From 8930252f97cab2342d1342c0f18af5a914c020fb Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 17 Oct 2024 11:22:24 +0800 Subject: [PATCH 36/37] fix: tag filed snprintf lenth error --- source/libs/command/src/command.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 27a43f7523..95c73763bf 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -551,7 +551,7 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) { (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, sizeof(type) - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type); } } From 9d40a6d68bc37ac5c30e50d228ae54245d9de322 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:04:05 +0800 Subject: [PATCH 37/37] Update 3.3.3.0.md --- docs/zh/28-releases/03-notes/3.3.3.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/28-releases/03-notes/3.3.3.0.md b/docs/zh/28-releases/03-notes/3.3.3.0.md index 405ca83d71..bb7bc0f831 100644 --- a/docs/zh/28-releases/03-notes/3.3.3.0.md +++ b/docs/zh/28-releases/03-notes/3.3.3.0.md @@ -10,7 +10,7 @@ description: 3.3.3.0 版本说明 4. TDengine支持macOS企业版客户端 [企业版] 5. taosX日志默认不写入syslog [企业版] 6. 服务端记录所有慢查询信息到log库 -7. show cluster machines 查询结果中添加服务端版本号 +7. show cluster machines 查询结果中添加服务端版本号 [企业版] 8. 删除保留关键字LEVEL/ENCODE/COMPRESS, 可以作为列名/表名/数据库名等使用 9. 禁止动态修改临时目录 10. round 函数:支持四舍五入的精度