From f70321ee53f2403e770acd2bda3a41038b437549 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 10:52:48 +0800 Subject: [PATCH 01/72] fix(vnd): check return value. --- source/dnode/vnode/src/vnd/vnodeSvr.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 371eaa0774..1b1bb9257d 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -640,40 +640,39 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg } } break; case TDMT_STREAM_TASK_DROP: { - if (tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { + if ((code = tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_TASK_UPDATE_CHKPT: { - if (tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { + if ((code = tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_CONSEN_CHKPT: { - if (pVnode->restored) { - if (tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg) < 0) { - goto _err; - } + if (pVnode->restored && (code = tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg)) < 0) { + goto _err; } + } break; case TDMT_STREAM_TASK_PAUSE: { if (pVnode->restored && vnodeIsLeader(pVnode) && - tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) { + (code = tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_STREAM_TASK_RESUME: { if (pVnode->restored && vnodeIsLeader(pVnode) && - tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) { + (code = tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) { goto _err; } } break; case TDMT_VND_STREAM_TASK_RESET: { - if (pVnode->restored && vnodeIsLeader(pVnode)) { - if (tqProcessTaskResetReq(pVnode->pTq, pMsg) < 0) { + if (pVnode->restored && vnodeIsLeader(pVnode) && + (code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) { goto _err; } - } + } break; case TDMT_VND_ALTER_CONFIRM: needCommit = pVnode->config.hashChange; @@ -693,10 +692,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg case TDMT_VND_DROP_INDEX: vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp); break; - case TDMT_VND_STREAM_CHECK_POINT_SOURCE: + case TDMT_VND_STREAM_CHECK_POINT_SOURCE: // always return true tqProcessTaskCheckPointSourceReq(pVnode->pTq, pMsg, pRsp); break; - case TDMT_VND_STREAM_TASK_UPDATE: + case TDMT_VND_STREAM_TASK_UPDATE: // always return true tqProcessTaskUpdateReq(pVnode->pTq, pMsg); break; case TDMT_VND_COMPACT: @@ -752,7 +751,7 @@ _exit: _err: vError("vgId:%d, process %s request failed since %s, ver:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), - tstrerror(terrno), ver); + tstrerror(code), ver); return code; } From 3367f129daf04dbd3731c4481dd81a1a0dacff10 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 11:27:37 +0800 Subject: [PATCH 02/72] fix(vnd): check return value. --- source/dnode/vnode/src/vnd/vnodeSvr.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 1b1bb9257d..dd13c975cf 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -633,9 +633,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg } break; case TDMT_STREAM_TASK_DEPLOY: { - int32_t code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; + if ((code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len)) != TSDB_CODE_SUCCESS) { goto _err; } } break; From 5dc933f5f1b80048a114dcd2d3ae652d05779c3c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 14:52:51 +0800 Subject: [PATCH 03/72] refactor: add some logs. --- source/common/src/tdatablock.c | 1 + source/dnode/vnode/src/tsdb/tsdbRead2.c | 4 +++- source/util/src/tarray.c | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d8a66f82bf..0d00a6a4c7 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2529,6 +2529,7 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); if (pColInfoData == NULL) { code = terrno; + uError("invalid param, size of list:%d index k:%d", (int32_t) taosArrayGetSize(pDataBlock->pDataBlock), k) goto _exit; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 5b6511a38e..4e253d7c2e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -855,6 +855,7 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S STableBlockScanInfo** p = taosArrayGetLast(pTableScanInfoList); if (p == NULL) { clearBrinBlockIter(&iter); + tsdbError("invalid param, empty in tablescanInfoList, %s", pReader->idStr); return TSDB_CODE_INVALID_PARA; } @@ -5256,7 +5257,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) { // NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit // the data should be ingested in round-robin and all the child tables should be createted before ingesting data // the version range of query will be used to identify the correctness of suspend/resume functions. - // this function will blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files + // this function will be blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files #if SUSPEND_RESUME_TEST if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) { tsem_wait(&pReader->resumeAfterSuspend); @@ -5909,6 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_ } else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing } else { code = TSDB_CODE_INVALID_PARA; + tsdbError("invalid mr.me.type:%d %s, code:%s", mr.me.type, tstrerror(code)); metaReaderClear(&mr); return code; } diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 7989a2468b..b94bb512e2 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -200,6 +200,7 @@ void* taosArrayPop(SArray* pArray) { void* taosArrayGet(const SArray* pArray, size_t index) { if (NULL == pArray) { terrno = TSDB_CODE_INVALID_PARA; + uError("failed to return value from array of null ptr"); return NULL; } From 5c1cffed692e27fcbc042bd2f2c571c3b5357a8d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 15:54:15 +0800 Subject: [PATCH 04/72] fix(stream): add some logs. --- source/dnode/vnode/src/tqCommon/tqCommon.c | 7 ++----- source/libs/stream/src/streamMeta.c | 4 ++++ source/libs/stream/src/streamSched.c | 6 +++++- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 6b7e857120..3871011407 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -1119,10 +1119,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t int32_t vgId = pMeta->vgId; int32_t code = 0; - if (pTask == NULL) { - return -1; - } - streamTaskResume(pTask); ETaskStatus status = streamTaskGetStatus(pTask).state; @@ -1150,7 +1146,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t } } - streamMetaReleaseTask(pMeta, pTask); return code; } @@ -1173,6 +1168,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode); if (code != 0) { + streamMetaReleaseTask(pMeta, pTask); return code; } @@ -1186,6 +1182,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m streamMutexUnlock(&pHTask->lock); code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode); + streamMetaReleaseTask(pMeta, pHTask); } return code; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 44c9e76906..29152c6205 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -759,6 +759,10 @@ void streamMetaAcquireOneTask(SStreamTask* pTask) { } void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { + if (pTask == NULL) { + return; + } + int32_t taskId = pTask->id.taskId; int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1); diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 98920e6f70..095a5af6d4 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -63,7 +63,11 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3 pRunReq->reqType = execType; SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; - return tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); + int32_t code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); + if (code) { + stError("vgId:%d failed to put msg into stream queue, code:%s, %x", vgId, tstrerror(code), taskId); + } + return code; } void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; } From d2f2a931fb0cf58c27434a04b6e8ad9a53fb9916 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 16:21:11 +0800 Subject: [PATCH 05/72] fix(util): reset the returned length value. --- source/common/src/tdatablock.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2047573b74..98e58c8bd7 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2530,7 +2530,6 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); if (pColInfoData == NULL) { code = terrno; - uError("invalid param, size of list:%d index k:%d", (int32_t) taosArrayGetSize(pDataBlock->pDataBlock), k) goto _exit; } @@ -2611,7 +2610,10 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf if (code < 0) { uError("func %s failed to convert to ucs charset since %s", __func__, tstrerror(code)); goto _exit; + } else { // reset the length value + code = TSDB_CODE_SUCCESS; } + len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf); if (len >= size - 1) goto _exit; } break; From 6e43521ba9fb2e6d4b580ef09639a16bc3a5e9d8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 18:28:14 +0800 Subject: [PATCH 06/72] fix(stream): only keep the latest pause operation status. --- source/libs/stream/src/streamTaskSm.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 3501d30be4..a10c4c30d5 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -485,6 +485,11 @@ int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, _ static void keepPrevInfo(SStreamTaskSM* pSM) { STaskStateTrans* pTrans = pSM->pActiveTrans; + // we only keep the latest pause state + if (pSM->prev.state.state == TASK_STATUS__PAUSE && pSM->current.state == TASK_STATUS__PAUSE) { + return; + } + pSM->prev.state = pSM->current; pSM->prev.evt = pTrans->event; } @@ -501,9 +506,10 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even if (pTrans == NULL) { ETaskStatus s = pSM->current.state; - if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && - s != TASK_STATUS__UNINIT && s != TASK_STATUS__READY) { - stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, GET_EVT_NAME(pSM->prev.evt)); + if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT && + s != TASK_STATUS__READY) { + stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, + GET_EVT_NAME(pSM->prev.evt)); } // the pSM->prev.evt may be 0, so print string is not appropriate. From 7e1c6b07392f21b29c221af1399b73754ba61617 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 18:34:52 +0800 Subject: [PATCH 07/72] fix(stream): avoid the later pause overwrite the previous pause state. --- source/libs/stream/src/streamTaskSm.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index a10c4c30d5..17d5d884a7 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -485,11 +485,6 @@ int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, _ static void keepPrevInfo(SStreamTaskSM* pSM) { STaskStateTrans* pTrans = pSM->pActiveTrans; - // we only keep the latest pause state - if (pSM->prev.state.state == TASK_STATUS__PAUSE && pSM->current.state == TASK_STATUS__PAUSE) { - return; - } - pSM->prev.state = pSM->current; pSM->prev.evt = pTrans->event; } @@ -527,10 +522,13 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even return TSDB_CODE_STREAM_INVALID_STATETRANS; } - keepPrevInfo(pSM); + // repeat pause will not overwrite the previous pause state + if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) { + keepPrevInfo(pSM); - pSM->current = pTrans->next; - pSM->pActiveTrans = NULL; + pSM->current = pTrans->next; + pSM->pActiveTrans = NULL; + } // todo remove it // todo: handle the error code From a197b20466f111589d3822aa51f3e47eaf7fbf12 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Oct 2024 19:28:08 +0800 Subject: [PATCH 08/72] other: update logs. --- source/libs/stream/src/streamTimer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamTimer.c b/source/libs/stream/src/streamTimer.c index 8b77fe7cb1..0da9acfd1d 100644 --- a/source/libs/stream/src/streamTimer.c +++ b/source/libs/stream/src/streamTimer.c @@ -56,7 +56,7 @@ void streamTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* pParam, void* } } - stDebug("vgId:%d start %s tmr succ", vgId, pMsg); + stTrace("vgId:%d start %s tmr succ", vgId, pMsg); } void streamTmrStop(tmr_h tmrId) { From 50ceb19cbff69a3f96d7230e9f7b03667614f1bc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 11 Oct 2024 19:46:54 +0800 Subject: [PATCH 09/72] fix(stream): reset the activeTrans if pause recv repeatly. --- source/libs/stream/src/streamTaskSm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 17d5d884a7..c3a2742aa2 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -525,11 +525,12 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even // repeat pause will not overwrite the previous pause state if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) { keepPrevInfo(pSM); - pSM->current = pTrans->next; - pSM->pActiveTrans = NULL; + } else { + stDebug("s-task:%s repeat pause evt recv, not update prev status", id); } + pSM->pActiveTrans = NULL; // todo remove it // todo: handle the error code // on success callback, add into lock if necessary, or maybe we should add an option for this? From 618ec35190d5fa9f86f227767f87db30002aa621 Mon Sep 17 00:00:00 2001 From: charles Date: Sat, 12 Oct 2024 15:15:15 +0800 Subject: [PATCH 10/72] update encrypt test case for ts-5507 by charles --- tests/army/db-encrypt/basic.py | 30 ++++++++++++++++++++++++++++++ tests/parallel_test/cases.task | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/army/db-encrypt/basic.py b/tests/army/db-encrypt/basic.py index 8d30bbcfe2..ea648f1b8f 100644 --- a/tests/army/db-encrypt/basic.py +++ b/tests/army/db-encrypt/basic.py @@ -13,6 +13,7 @@ from frame.srvCtl import * from frame.caseBase import * from frame import * from frame.autogen import * +from frame import epath # from frame.server.dnodes import * # from frame.server.cluster import * @@ -20,7 +21,9 @@ from frame.autogen import * class TDTestCase(TBase): def init(self, conn, logSql, replicaVar=1): + updatecfgDict = {'dDebugFlag':131} super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1") + self.valgrind = 0 self.db = "test" self.stb = "meters" @@ -50,9 +53,36 @@ class TDTestCase(TBase): tdSql.error("create encrypt_key '12345678abcdefghi'") tdSql.error("create database test ENCRYPT_ALGORITHM 'sm4'") + def recreate_dndoe_encrypt_key(self): + """ + Description: From the jira TS-5507, the encrypt key can be recreated. + create: + 2024-09-23 created by Charles + update: + None + """ + # taosd path + taosd_path = epath.binPath() + tdLog.info(f"taosd_path: {taosd_path}") + # dnode2 path + dndoe2_path = tdDnodes.getDnodeDir(2) + dnode2_data_path = os.sep.join([dndoe2_path, "data"]) + dnode2_cfg_path = os.sep.join([dndoe2_path, "cfg"]) + tdLog.info(f"dnode2_path: {dnode2_data_path}") + # stop dnode2 + tdDnodes.stoptaosd(2) + tdLog.info("stop dndoe2") + # delete dndoe2 data + cmd = f"rm -rf {dnode2_data_path}" + os.system(cmd) + # recreate the encrypt key for dnode2 + os.system(f"{os.sep.join([taosd_path, "taosd"])} -y '1234567890' -c {dnode2_cfg_path}") + tdLog.info("test case: recreate the encrypt key for dnode2 passed") + def run(self): self.create_encrypt_db_error() self.create_encrypt_db() + self.recreate_dndoe_encrypt_key() def stop(self): tdSql.close() diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 0d3ed1f8e6..5d94c2a6b1 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -10,7 +10,7 @@ # army-test # ,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2 -,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py +,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py -N 3 -M 3 ,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py From 1980866fd94c7b2e8676a45b6666a3aae50258ab Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Sat, 12 Oct 2024 17:03:40 +0800 Subject: [PATCH 11/72] test:Add a function to verify the consistency between the expected result file and the actual output file --- tests/army/frame/common.py | 48 +++++++++ tests/army/query/function/ans/pi_1.csv | 114 +++++++++++++++++++++ tests/army/query/function/in/pi.in | 41 ++++---- tests/army/query/function/test_function.py | 15 ++- tests/system-test/7-tmq/ts-4674.py | 50 ++------- 5 files changed, 201 insertions(+), 67 deletions(-) create mode 100644 tests/army/query/function/ans/pi_1.csv diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index 913e88a7ad..a91866c0e1 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -18,6 +18,7 @@ import time import socket import json import toml +import subprocess from frame.boundary import DataBoundary import taos from frame.log import * @@ -1830,6 +1831,51 @@ class TDCom: if i == 1: self.record_history_ts = ts_value + def generate_query_result(self, inputfile, test_case): + if not os.path.exists(inputfile): + tdLog.exit(f"Input file '{inputfile}' does not exist.") + else: + self.query_result_file = f"./temp_{test_case}.result" + os.system(f"taos -f {inputfile} | grep -v 'Query OK'|grep -v 'Copyright'| grep -v 'Welcome to the TDengine Command' > {self.query_result_file} ") + return self.query_result_file + + def compare_result_files(self, file1, file2): + + try: + # 使用 subprocess.run 来执行 diff/fc 命令 + # print(file1, file2) + if platform.system().lower() != 'windows': + cmd='diff' + result = subprocess.run([cmd, "-u", "--color", file1, file2], text=True, capture_output=True) + else: + cmd='fc' + result = subprocess.run([cmd, file1, file2], text=True, capture_output=True) + # 如果输出不为空,则打印差异 + if result.stdout: + tdLog.debug(f"Differences between {file1} and {file2}") + tdLog.notice(f"\r\n{result.stdout}") + return False + else: + return True + except FileNotFoundError: + tdLog.debug("The 'diff' command is not found. Please make sure it's installed and available in your PATH.") + except Exception as e: + tdLog.debug(f"An error occurred: {e}") + + + def compare_testcase_result(self, inputfile,expected_file,test_case): + test_reulst_file = self.generate_query_result(inputfile,test_case) + + if self.compare_result_files(expected_file, test_reulst_file ): + tdLog.info("Test passed: Result files are identical.") + os.system(f"rm -f {test_reulst_file}") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.exit(f"{caller.lineno}(line:{caller.lineno}) failed: sqlfile is {inputfile}, expect_file:{expected_file} != reult_file:{test_reulst_file} ") + + tdLog.exit("Test failed: Result files are different.") + + def is_json(msg): if isinstance(msg, str): try: @@ -1864,4 +1910,6 @@ def dict2toml(in_dict: dict, file:str): with open(file, 'w') as f: toml.dump(in_dict, f) + + tdCom = TDCom() diff --git a/tests/army/query/function/ans/pi_1.csv b/tests/army/query/function/ans/pi_1.csv new file mode 100644 index 0000000000..6f9baf1a71 --- /dev/null +++ b/tests/army/query/function/ans/pi_1.csv @@ -0,0 +1,114 @@ + +taos> select pi() + pi() | +============================ + 3.141592653589793 | + +taos> select pi() + 1 + pi() + 1 | +============================ + 4.141592653589793 | + +taos> select pi() - 1 + pi() - 1 | +============================ + 2.141592653589793 | + +taos> select pi() * 2 + pi() * 2 | +============================ + 6.283185307179586 | + +taos> select pi() / 2 + pi() / 2 | +============================ + 1.570796326794897 | + +taos> select pi() from ts_4893.meters limit 5 + pi() | +============================ + 3.141592653589793 | + 3.141592653589793 | + 3.141592653589793 | + 3.141592653589793 | + 3.141592653589793 | + +taos> select pi() + 1 from ts_4893.meters limit 1 + pi() + 1 | +============================ + 4.141592653589793 | + +taos> select pi() - 1 from ts_4893.meters limit 1 + pi() - 1 | +============================ + 2.141592653589793 | + +taos> select pi() * 2 from ts_4893.meters limit 1 + pi() * 2 | +============================ + 6.283185307179586 | + +taos> select pi() / 2 from ts_4893.meters limit 1 + pi() / 2 | +============================ + 1.570796326794897 | + +taos> select pi() + pi() from ts_4893.meters limit 1 + pi() + pi() | +============================ + 6.283185307179586 | + +taos> select pi() - pi() from ts_4893.meters limit 1 + pi() - pi() | +============================ + 0.000000000000000 | + +taos> select pi() * pi() from ts_4893.meters limit 1 + pi() * pi() | +============================ + 9.869604401089358 | + +taos> select pi() / pi() from ts_4893.meters limit 1 + pi() / pi() | +============================ + 1.000000000000000 | + +taos> select pi() + id from ts_4893.meters order by ts limit 5 + pi() + id | +============================ + 3.141592653589793 | + 4.141592653589793 | + 5.141592653589793 | + 6.141592653589793 | + 7.141592653589793 | + +taos> select abs(pi()) + abs(pi()) | +============================ + 3.141592653589793 | + +taos> select pow(pi(), 2) + pow(pi(), 2) | +============================ + 9.869604401089358 | + +taos> select sqrt(pi()) + sqrt(pi()) | +============================ + 1.772453850905516 | + +taos> select cast(pi() as int) + cast(pi() as int) | +==================== + 3 | + +taos> select pi() + pi() | +============================ + 3.141592653589793 | + +taos> select substring_index(null, '.', 2) + substring_index(null, '.', 2) | +================================ + NULL | + diff --git a/tests/army/query/function/in/pi.in b/tests/army/query/function/in/pi.in index c0ccc0b079..dc5d24b655 100644 --- a/tests/army/query/function/in/pi.in +++ b/tests/army/query/function/in/pi.in @@ -1,20 +1,21 @@ -select pi(); -select pi() + 1; -select pi() - 1; -select pi() * 2; -select pi() / 2; -select pi() from ts_4893.meters limit 5; -select pi() + 1 from ts_4893.meters limit 1; -select pi() - 1 from ts_4893.meters limit 1; -select pi() * 2 from ts_4893.meters limit 1; -select pi() / 2 from ts_4893.meters limit 1; -select pi() + pi() from ts_4893.meters limit 1; -select pi() - pi() from ts_4893.meters limit 1; -select pi() * pi() from ts_4893.meters limit 1; -select pi() / pi() from ts_4893.meters limit 1; -select pi() + id from ts_4893.meters order by ts limit 5; -select abs(pi()); -select pow(pi(), 2); -select sqrt(pi()); -select cast(pi() as int); -select pi(); +select pi() +select pi() + 1 +select pi() - 1 +select pi() * 2 +select pi() / 2 +select pi() from ts_4893.meters limit 5 +select pi() + 1 from ts_4893.meters limit 1 +select pi() - 1 from ts_4893.meters limit 1 +select pi() * 2 from ts_4893.meters limit 1 +select pi() / 2 from ts_4893.meters limit 1 +select pi() + pi() from ts_4893.meters limit 1 +select pi() - pi() from ts_4893.meters limit 1 +select pi() * pi() from ts_4893.meters limit 1 +select pi() / pi() from ts_4893.meters limit 1 +select pi() + id from ts_4893.meters order by ts limit 5 +select abs(pi()) +select pow(pi(), 2) +select sqrt(pi()) +select cast(pi() as int) +select pi() +select substring_index(null, '.', 2) diff --git a/tests/army/query/function/test_function.py b/tests/army/query/function/test_function.py index 18a0d46711..aae0cf6eee 100644 --- a/tests/army/query/function/test_function.py +++ b/tests/army/query/function/test_function.py @@ -17,14 +17,15 @@ import random import taos import frame -import frame.etool +from frame.etool import * from frame.log import * from frame.cases import * from frame.sql import * from frame.caseBase import * -from frame import * +from frame import etool +from frame.common import * class TDTestCase(TBase): updatecfgDict = { @@ -84,8 +85,16 @@ class TDTestCase(TBase): tdSql.error(err_statement) err_statement = '' + def test_normal_query_new(self, testCase): + # read sql from .sql file and execute + tdLog.info(f"test normal query.") + self.sqlFile = etool.curFile(__file__, f"in/{testCase}.in") + self.ansFile = etool.curFile(__file__, f"ans/{testCase}_1.csv") + + tdCom.compare_testcase_result(self.sqlFile, self.ansFile, testCase) + def test_pi(self): - self.test_normal_query("pi") + self.test_normal_query_new("pi") def test_round(self): self.test_normal_query("round") diff --git a/tests/system-test/7-tmq/ts-4674.py b/tests/system-test/7-tmq/ts-4674.py index 709debaef1..0b3dc1b077 100644 --- a/tests/system-test/7-tmq/ts-4674.py +++ b/tests/system-test/7-tmq/ts-4674.py @@ -24,45 +24,6 @@ class TDTestCase: tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - #tdSql.init(conn.cursor(), logSql) # output sql.txt file - - # def consume_TS_4674_Test(self): - # - # os.system("nohup taosBenchmark -y -B 1 -t 4 -S 1000 -n 1000000 -i 1000 -v 1 -a 3 > /dev/null 2>&1 &") - # time.sleep() - # tdSql.execute(f'create topic topic_all with meta as database test') - # consumer_dict = { - # "group.id": "g1", - # "td.connect.user": "root", - # "td.connect.pass": "taosdata", - # "auto.offset.reset": "earliest", - # } - # consumer = Consumer(consumer_dict) - # - # try: - # consumer.subscribe(["topic_all"]) - # except TmqError: - # tdLog.exit(f"subscribe error") - # - # try: - # while True: - # res = consumer.poll(5) - # if not res: - # print(f"null") - # continue - # val = res.value() - # if val is None: - # print(f"null") - # continue - # cnt = 0; - # for block in val: - # cnt += len(block.fetchall()) - # - # print(f"block {cnt} rows") - # - # finally: - # consumer.close() - def get_leader(self): tdLog.debug("get leader") tdSql.query("show vnodes") @@ -74,19 +35,20 @@ class TDTestCase: def balance_vnode(self): leader_before = self.get_leader() - + tdSql.query("balance vgroup leader") while True: leader_after = -1 - tdSql.query("balance vgroup leader") + tdLog.debug("balancing vgroup leader") while True: + tdLog.debug("get new vgroup leader") leader_after = self.get_leader() if leader_after != -1 : - break; + break else: time.sleep(1) if leader_after != leader_before: tdLog.debug("leader changed") - break; + break else : time.sleep(1) @@ -115,7 +77,7 @@ class TDTestCase: except TmqError: tdLog.exit(f"subscribe error") - cnt = 0; + cnt = 0 balance = False try: while True: From e7b5c72ff23d3f75f4332c758cf06240efaac7a4 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Sat, 12 Oct 2024 18:13:11 +0800 Subject: [PATCH 12/72] Update basic.py --- tests/army/db-encrypt/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/army/db-encrypt/basic.py b/tests/army/db-encrypt/basic.py index ea648f1b8f..a93b85f0da 100644 --- a/tests/army/db-encrypt/basic.py +++ b/tests/army/db-encrypt/basic.py @@ -76,7 +76,7 @@ class TDTestCase(TBase): cmd = f"rm -rf {dnode2_data_path}" os.system(cmd) # recreate the encrypt key for dnode2 - os.system(f"{os.sep.join([taosd_path, "taosd"])} -y '1234567890' -c {dnode2_cfg_path}") + os.system(f"{os.sep.join([taosd_path, 'taosd'])} -y '1234567890' -c {dnode2_cfg_path}") tdLog.info("test case: recreate the encrypt key for dnode2 passed") def run(self): From 11ed8a1540fe444531d50ad920a1b49e919e894b Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Wed, 25 Sep 2024 16:14:47 +0800 Subject: [PATCH 13/72] fix:[TD-32184] Support backwards compatibility for function's result info. --- include/libs/function/function.h | 12 ++ source/libs/executor/inc/executil.h | 4 + source/libs/executor/src/executil.c | 116 ++++++++++++++++++ .../executor/src/streamtimewindowoperator.c | 5 + 4 files changed, 137 insertions(+) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 7ca046762a..c66d74a905 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -29,6 +29,7 @@ struct SqlFunctionCtx; struct SResultRowEntryInfo; struct SFunctionNode; +struct SExprSupp; typedef struct SScalarParam SScalarParam; typedef struct SStreamState SStreamState; @@ -43,6 +44,7 @@ typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx); typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock); typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx); +typedef int32_t (*FExecDecode)(struct SqlFunctionCtx *pCtx, const char *buf, struct SResultRowEntryInfo *pResultCellInfo, int32_t version); typedef int32_t (*processFuncByRow)(SArray* pCtx); // array of SqlFunctionCtx typedef struct SScalarFuncExecFuncs { @@ -57,6 +59,7 @@ typedef struct SFuncExecFuncs { FExecFinalize finalize; FExecCombine combine; FExecCleanUp cleanup; + FExecDecode decode; processFuncByRow processFuncByRow; } SFuncExecFuncs; @@ -65,6 +68,8 @@ typedef struct SFuncExecFuncs { #define TOP_BOTTOM_QUERY_LIMIT 100 #define FUNCTIONS_NAME_MAX_LENGTH 32 +#define FUNCTION_RESULT_INFO_VERSION 1 + typedef struct SResultRowEntryInfo { bool initialized : 1; // output buffer has been initialized bool complete : 1; // query has completed @@ -165,6 +170,11 @@ typedef struct STdbState { void *txn; } STdbState; +typedef struct SResultRowStore { + int32_t (*resultRowPut)(struct SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize); + int32_t (*resultRowGet)(struct SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize); +} SResultRowStore; + struct SStreamState { STdbState *pTdbState; struct SStreamFileState *pFileState; @@ -175,6 +185,8 @@ struct SStreamState { int64_t streamBackendRid; int8_t dump; int32_t tsIndex; + SResultRowStore *pResultRowStore; + struct SExprSupp *pExprSupp; }; typedef struct SFunctionStateStore { diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 95035dd96f..9e36a29476 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -48,6 +48,7 @@ typedef struct SGroupResInfo { } SGroupResInfo; typedef struct SResultRow { + int32_t version; int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer int32_t offset : 29; // row index in buffer page bool startInterp; // the time window start timestamp has done the interpolation already. @@ -152,6 +153,9 @@ static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRo return pRow; } +int32_t getResultRowFromBuf(struct SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize); +int32_t putResultRowToBuf(struct SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize); + int32_t initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order); void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 4fe45ff72e..141d64bfd1 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -93,6 +93,122 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) { return rowSize; } +// Convert buf read from rocksdb to result row +int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize) { + SqlFunctionCtx *pCtx = pSup->pCtx; + int32_t *offset = pSup->rowEntryInfoOffset; + SResultRow *pResultRow = (SResultRow*)outBuf; + size_t processedSize = 0; + int32_t code = TSDB_CODE_SUCCESS; + if (inBuf == NULL) { + qError("invalid input buffer, inBuf:%p", inBuf); + return TSDB_CODE_INVALID_PARA; + } + + // calculate the size of output buffer + *outBufSize = getResultRowSize(pCtx, pSup->numOfExprs); + *outBuf = taosMemoryMalloc(*outBufSize); + if (*outBuf == NULL) { + qError("failed to allocate memory for output buffer, size:%zu", *outBufSize); + return terrno; + } + (void)memcpy(pResultRow, inBuf, sizeof(SResultRow)); + inBuf += sizeof(SResultRow); + processedSize += sizeof(SResultRow); + for (int32_t i = 0; i < pSup->numOfExprs; ++i) { + int32_t len = *(int32_t*)inBuf; + inBuf += sizeof(int32_t); + processedSize += sizeof(int32_t); + if (pCtx->fpSet.decode) { + code = pCtx->fpSet.decode(&pCtx[i], inBuf, getResultEntryInfo(pResultRow, i, offset), pResultRow->version); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to decode result row, code:%d", code); + return code; + } + } else { + (void)memcpy(getResultEntryInfo(pResultRow, i, offset), inBuf, len); + } + inBuf += len; + processedSize += len; + } + void *pos = getResultEntryInfo(pResultRow, pSup->numOfExprs - 1, offset) + + sizeof(SResultRowEntryInfo) + + pCtx[pSup->numOfExprs - 1].resDataInfo.interBufSize; + (void)memcpy(pos, inBuf, pSup->numOfExprs * sizeof(bool)); + inBuf += pSup->numOfExprs * sizeof(bool); + processedSize += pSup->numOfExprs * sizeof(bool); + + if (processedSize < inBufSize) { + // stream stores extra data after result row + size_t leftLen = inBufSize - processedSize; + TAOS_MEMORY_REALLOC(*outBuf, *outBufSize + leftLen); + if (*outBuf == NULL) { + qError("failed to reallocate memory for output buffer, size:%zu", *outBufSize + leftLen); + return terrno; + } + (void)memcpy(outBuf + processedSize, inBuf, leftLen); + inBuf += leftLen; + processedSize += leftLen; + *outBufSize += leftLen; + } + return TSDB_CODE_SUCCESS; +} + +// Convert result row to buf for rocksdb +int32_t putResultRowToBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize) { + SqlFunctionCtx *pCtx = pSup->pCtx; + int32_t *offset = pSup->rowEntryInfoOffset; + SResultRow *pResultRow = (SResultRow*)inBuf; + size_t rowSize = getResultRowSize(pCtx, pSup->numOfExprs); + + if (inBuf == NULL) { + qError("invalid input buffer, inBuf:%p", inBuf); + return TSDB_CODE_INVALID_PARA; + } + if (rowSize > inBufSize) { + qError("invalid input buffer size, rowSize:%zu, inBufSize:%zu", rowSize, inBufSize); + return TSDB_CODE_INVALID_PARA; + } + + // calculate the size of output buffer + *outBufSize = rowSize + sizeof(int32_t) * pSup->numOfExprs; + if (rowSize < inBufSize) { + *outBufSize += inBufSize - rowSize; + } + + *outBuf = taosMemoryMalloc(*outBufSize); + if (*outBuf == NULL) { + qError("failed to allocate memory for output buffer, size:%zu", *outBufSize); + return terrno; + } + + pResultRow->version = FUNCTION_RESULT_INFO_VERSION; + (void)memcpy(outBuf, pResultRow, sizeof(SResultRow)); + outBuf += sizeof(SResultRow); + for (int32_t i = 0; i < pSup->numOfExprs; ++i) { + *(int32_t *) outBuf = offset[i]; + outBuf += sizeof(int32_t); + size_t len = sizeof(SResultRowEntryInfo) + pCtx[i].resDataInfo.interBufSize; + (void)memcpy(outBuf, getResultEntryInfo(pResultRow, i, offset), len); + outBuf += len; + } + + // mark if col is null for top/bottom result(saveTupleData) + void *pos = getResultEntryInfo(pResultRow, pSup->numOfExprs - 1, offset) + + sizeof(SResultRowEntryInfo) + + pCtx[pSup->numOfExprs - 1].resDataInfo.interBufSize; + + (void)memcpy(outBuf, pos, pSup->numOfExprs * sizeof(bool)); + + if (rowSize < inBufSize) { + // stream stores extra data after result row + size_t leftLen = inBufSize - rowSize; + (void)memcpy(outBuf, inBuf + rowSize, leftLen); + outBuf += leftLen; + } + return TSDB_CODE_SUCCESS; +} + static void freeEx(void* p) { taosMemoryFree(*(void**)p); } void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) { diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index be27f277c0..cf539cb4cb 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -2232,6 +2232,11 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in pSup->pResultRows = tSimpleHashInit(32, hashFn); QUERY_CHECK_NULL(pSup->pResultRows, code, lino, _end, terrno); + // used for backward compatibility of function's result info + pSup->pState->pResultRowStore->resultRowGet = getResultRowFromBuf; + pSup->pState->pResultRowStore->resultRowPut = putResultRowToBuf; + pSup->pState->pExprSupp = pExpSup; + for (int32_t i = 0; i < numOfOutput; ++i) { pExpSup->pCtx[i].saveHandle.pState = pSup->pState; } From ecfa67510c12983056116ef902b60da56ff80641 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 8 Oct 2024 19:44:56 +0800 Subject: [PATCH 14/72] add serial --- source/libs/stream/src/streamBackendRocksdb.c | 152 +++++++++++++++--- source/libs/stream/src/tstreamFileState.c | 2 +- 2 files changed, 132 insertions(+), 22 deletions(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index c88971ab75..4326ac250a 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -3289,13 +3289,31 @@ int32_t streamStatePut_rocksdb(SStreamState* pState, const SWinKey* key, const v int code = 0; SStateKey sKey = {.key = *key, .opNum = pState->number}; - STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)value, vLen); + char* dst = NULL; + size_t size = 0; + code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + if (code != 0) { + return code; + } + STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)dst, size); + + taosMemoryFree(dst); return code; } int32_t streamStateGet_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { int code = 0; SStateKey sKey = {.key = *key, .opNum = pState->number}; - STREAM_STATE_GET_ROCKSDB(pState, "state", &sKey, pVal, pVLen); + + char* tVal; + size_t tValLen = 0; + STREAM_STATE_GET_ROCKSDB(pState, "state", &sKey, &tVal, &tValLen); + if (code != 0) { + taosMemoryFree(tVal); + return code; + } + + code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); + taosMemoryFree(tVal); return code; } int32_t streamStateDel_rocksdb(SStreamState* pState, const SWinKey* key) { @@ -3541,14 +3559,31 @@ SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey* // func cf int32_t streamStateFuncPut_rocksdb(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) { - int code = 0; - STREAM_STATE_PUT_ROCKSDB(pState, "func", key, (void*)value, vLen); + int code = 0; + char* dst = NULL; + size_t size = 0; + code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + if (code != 0) { + return code; + } + STREAM_STATE_PUT_ROCKSDB(pState, "func", key, (void*)dst, size); + taosMemoryFree(dst); + return code; } int32_t streamStateFuncGet_rocksdb(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) { - int code = 0; - STREAM_STATE_GET_ROCKSDB(pState, "func", key, pVal, pVLen); - return 0; + int code = 0; + char* tVal = NULL; + size_t tValLen = 0; + STREAM_STATE_GET_ROCKSDB(pState, "func", key, tVal, &tValLen); + if (code != 0) { + taosMemoryFree(tVal); + return code; + } + code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); + + taosMemoryFree(tVal); + return code; } int32_t streamStateFuncDel_rocksdb(SStreamState* pState, const STupleKey* key) { int code = 0; @@ -3563,7 +3598,15 @@ int32_t streamStateSessionPut_rocksdb(SStreamState* pState, const SSessionKey* k if (value == NULL || vLen == 0) { stError("streamStateSessionPut_rocksdb val: %p, len: %d", value, vLen); } - STREAM_STATE_PUT_ROCKSDB(pState, "sess", &sKey, value, vLen); + char* dst = NULL; + size_t size = 0; + code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + if (code != 0) { + return code; + } + STREAM_STATE_PUT_ROCKSDB(pState, "sess", &sKey, dst, size); + taosMemoryFree(dst); + return code; } int32_t streamStateSessionGet_rocksdb(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen) { @@ -3861,13 +3904,30 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* int32_t streamStateFillPut_rocksdb(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { int code = 0; - STREAM_STATE_PUT_ROCKSDB(pState, "fill", key, value, vLen); + char* dst = NULL; + size_t size = 0; + code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + if (code != 0) { + return code; + } + STREAM_STATE_PUT_ROCKSDB(pState, "fill", key, dst, size); + + taosMemoryFree(dst); return code; } int32_t streamStateFillGet_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { int code = 0; - STREAM_STATE_GET_ROCKSDB(pState, "fill", key, pVal, pVLen); + + char* tVal; + size_t tValLen = 0; + STREAM_STATE_GET_ROCKSDB(pState, "fill", key, &tVal, &tValLen); + if (code != 0) { + taosMemoryFree(tVal); + return code; + } + code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); + taosMemoryFree(tVal); return code; } int32_t streamStateFillDel_rocksdb(SStreamState* pState, const SWinKey* key) { @@ -4204,21 +4264,44 @@ _end: #ifdef BUILD_NO_CALL // partag cf int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen) { - int code = 0; - STREAM_STATE_PUT_ROCKSDB(pState, "partag", &groupId, tag, tagLen); + int code = 0; + char* dst = NULL; + size_t size = 0; + code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + if (code != 0) { + return code; + } + STREAM_STATE_PUT_ROCKSDB(pState, "partag", &groupId, dst, size); + taosMemoryFree(dst); return code; } int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen) { - int code = 0; - STREAM_STATE_GET_ROCKSDB(pState, "partag", &groupId, tagVal, tagLen); + int code = 0; + char* tVal; + size_t tValLen = 0; + STREAM_STATE_GET_ROCKSDB(pState, "partag", &groupId, &tVal, &tValLen); + if (code != 0) { + taosMemoryFree(tVal); + return code; + } + code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)tagVal, (size_t*)tagLen); + taosMemoryFree(tVal); + return code; } #endif // parname cfg int32_t streamStatePutParName_rocksdb(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) { - int code = 0; - STREAM_STATE_PUT_ROCKSDB(pState, "parname", &groupId, (char*)tbname, TSDB_TABLE_NAME_LEN); + int code = 0; + char* dst = NULL; + size_t size = 0; + code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, tbname, TSDB_TABLE_NAME_LEN, &dst, &size); + if (code != 0) { + return code; + } + STREAM_STATE_PUT_ROCKSDB(pState, "parname", &groupId, (char*)dst, size); + taosMemoryFree(dst); return code; } int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, void** pVal) { @@ -4229,13 +4312,30 @@ int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, voi } int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen) { - int code = 0; - STREAM_STATE_PUT_ROCKSDB(pState, "default", key, pVal, pVLen); + int code = 0; + char* dst = NULL; + size_t size = 0; + code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, pVal, pVLen, &dst, &size); + if (code != 0) { + return code; + } + STREAM_STATE_PUT_ROCKSDB(pState, "default", key, dst, size); + taosMemoryFree(dst); return code; } int32_t streamDefaultGet_rocksdb(SStreamState* pState, const void* key, void** pVal, int32_t* pVLen) { - int code = 0; + int code = 0; + char* tVal; + size_t tValLen = 0; STREAM_STATE_GET_ROCKSDB(pState, "default", key, pVal, pVLen); + if (code != 0) { + taosMemoryFree(tVal); + return code; + } + + code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); + taosMemoryFree(tVal); + return code; } int32_t streamDefaultDel_rocksdb(SStreamState* pState, const void* key) { @@ -4377,10 +4477,18 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfKeyName, rocksdb int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key, void* val, int32_t vlen, int64_t ttl, void* tmpBuf) { - char buf[128] = {0}; + char buf[128] = {0}; + + char* dst = NULL; + size_t size = 0; + int32_t code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, val, vlen, &dst, &size); + if (code != 0) { + return code; + } + int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf); char* ttlV = tmpBuf; - int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(val, vlen, ttl, &ttlV); + int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(dst, size, ttl, &ttlV); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; @@ -4389,6 +4497,8 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb rocksdb_column_family_handle_t* pCf = wrapper->pCf[ginitDict[cfIdx].idx]; rocksdb_writebatch_put_cf((rocksdb_writebatch_t*)pBatch, pCf, buf, (size_t)klen, ttlV, (size_t)ttlVLen); + taosMemoryFree(dst); + if (tmpBuf == NULL) { taosMemoryFree(ttlV); } diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index cf5f1b2b91..7237f23671 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -698,7 +698,7 @@ void flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, boo int idx = streamStateGetCfIdx(pFileState->pFileStore, pFileState->cfName); - int32_t len = pFileState->rowSize + sizeof(uint64_t) + sizeof(int32_t) + 64; + int32_t len = (pFileState->rowSize + sizeof(uint64_t) + sizeof(int32_t) + 64) * 2; char* buf = taosMemoryCalloc(1, len); if (!buf) { code = terrno; From 06121e6c9d9c13b28fdfb86dc89d71043096c9f8 Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Thu, 10 Oct 2024 09:04:44 +0800 Subject: [PATCH 15/72] fix:[TD-32184] fix compile error. --- include/libs/function/function.h | 2 +- source/libs/executor/src/executil.c | 24 ++++--- .../executor/src/streamtimewindowoperator.c | 9 ++- source/libs/stream/src/streamBackendRocksdb.c | 70 ++++--------------- source/libs/stream/src/streamState.c | 3 + 5 files changed, 39 insertions(+), 69 deletions(-) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index c66d74a905..51d9e752a4 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -185,7 +185,7 @@ struct SStreamState { int64_t streamBackendRid; int8_t dump; int32_t tsIndex; - SResultRowStore *pResultRowStore; + SResultRowStore pResultRowStore; struct SExprSupp *pExprSupp; }; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 141d64bfd1..94b89ab64c 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -97,7 +97,7 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) { int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize) { SqlFunctionCtx *pCtx = pSup->pCtx; int32_t *offset = pSup->rowEntryInfoOffset; - SResultRow *pResultRow = (SResultRow*)outBuf; + SResultRow *pResultRow = NULL; size_t processedSize = 0; int32_t code = TSDB_CODE_SUCCESS; if (inBuf == NULL) { @@ -112,6 +112,7 @@ int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize qError("failed to allocate memory for output buffer, size:%zu", *outBufSize); return terrno; } + pResultRow = (SResultRow*)*outBuf; (void)memcpy(pResultRow, inBuf, sizeof(SResultRow)); inBuf += sizeof(SResultRow); processedSize += sizeof(SResultRow); @@ -146,7 +147,7 @@ int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize qError("failed to reallocate memory for output buffer, size:%zu", *outBufSize + leftLen); return terrno; } - (void)memcpy(outBuf + processedSize, inBuf, leftLen); + (void)memcpy(*outBuf + processedSize, inBuf, leftLen); inBuf += leftLen; processedSize += leftLen; *outBufSize += leftLen; @@ -182,15 +183,16 @@ int32_t putResultRowToBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, return terrno; } + char *pBuf = *outBuf; pResultRow->version = FUNCTION_RESULT_INFO_VERSION; - (void)memcpy(outBuf, pResultRow, sizeof(SResultRow)); - outBuf += sizeof(SResultRow); + (void)memcpy(pBuf, pResultRow, sizeof(SResultRow)); + pBuf += sizeof(SResultRow); for (int32_t i = 0; i < pSup->numOfExprs; ++i) { - *(int32_t *) outBuf = offset[i]; - outBuf += sizeof(int32_t); size_t len = sizeof(SResultRowEntryInfo) + pCtx[i].resDataInfo.interBufSize; - (void)memcpy(outBuf, getResultEntryInfo(pResultRow, i, offset), len); - outBuf += len; + *(int32_t *) pBuf = (int32_t)len; + pBuf += sizeof(int32_t); + (void)memcpy(pBuf, getResultEntryInfo(pResultRow, i, offset), len); + pBuf += len; } // mark if col is null for top/bottom result(saveTupleData) @@ -198,13 +200,13 @@ int32_t putResultRowToBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, sizeof(SResultRowEntryInfo) + pCtx[pSup->numOfExprs - 1].resDataInfo.interBufSize; - (void)memcpy(outBuf, pos, pSup->numOfExprs * sizeof(bool)); + (void)memcpy(pBuf, pos, pSup->numOfExprs * sizeof(bool)); if (rowSize < inBufSize) { // stream stores extra data after result row size_t leftLen = inBufSize - rowSize; - (void)memcpy(outBuf, inBuf + rowSize, leftLen); - outBuf += leftLen; + (void)memcpy(pBuf, inBuf + rowSize, leftLen); + pBuf += leftLen; } return TSDB_CODE_SUCCESS; } diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index cf539cb4cb..fcd58fbe56 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -2233,8 +2233,8 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in QUERY_CHECK_NULL(pSup->pResultRows, code, lino, _end, terrno); // used for backward compatibility of function's result info - pSup->pState->pResultRowStore->resultRowGet = getResultRowFromBuf; - pSup->pState->pResultRowStore->resultRowPut = putResultRowToBuf; + pSup->pState->pResultRowStore.resultRowGet = getResultRowFromBuf; + pSup->pState->pResultRowStore.resultRowPut = putResultRowToBuf; pSup->pState->pExprSupp = pExpSup; for (int32_t i = 0; i < numOfOutput; ++i) { @@ -5396,6 +5396,11 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* &pInfo->pState->pFileState); QUERY_CHECK_CODE(code, lino, _error); + // used for backward compatibility of function's result info + pInfo->pState->pResultRowStore.resultRowGet = getResultRowFromBuf; + pInfo->pState->pResultRowStore.resultRowPut = putResultRowToBuf; + pInfo->pState->pExprSupp = &pOperator->exprSupp; + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED, pInfo, pTaskInfo); diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 4326ac250a..8a487ffeae 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -3291,11 +3291,11 @@ int32_t streamStatePut_rocksdb(SStreamState* pState, const SWinKey* key, const v SStateKey sKey = {.key = *key, .opNum = pState->number}; char* dst = NULL; size_t size = 0; - code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); if (code != 0) { return code; } - STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)dst, size); + STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)dst, (int32_t)size); taosMemoryFree(dst); return code; @@ -3312,7 +3312,7 @@ int32_t streamStateGet_rocksdb(SStreamState* pState, const SWinKey* key, void** return code; } - code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); + code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); taosMemoryFree(tVal); return code; } @@ -3562,11 +3562,11 @@ int32_t streamStateFuncPut_rocksdb(SStreamState* pState, const STupleKey* key, c int code = 0; char* dst = NULL; size_t size = 0; - code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); if (code != 0) { return code; } - STREAM_STATE_PUT_ROCKSDB(pState, "func", key, (void*)dst, size); + STREAM_STATE_PUT_ROCKSDB(pState, "func", key, (void*)dst, (int32_t)size); taosMemoryFree(dst); return code; @@ -3580,7 +3580,7 @@ int32_t streamStateFuncGet_rocksdb(SStreamState* pState, const STupleKey* key, v taosMemoryFree(tVal); return code; } - code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); + code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); taosMemoryFree(tVal); return code; @@ -3600,11 +3600,11 @@ int32_t streamStateSessionPut_rocksdb(SStreamState* pState, const SSessionKey* k } char* dst = NULL; size_t size = 0; - code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); if (code != 0) { return code; } - STREAM_STATE_PUT_ROCKSDB(pState, "sess", &sKey, dst, size); + STREAM_STATE_PUT_ROCKSDB(pState, "sess", &sKey, dst, (int32_t)size); taosMemoryFree(dst); return code; @@ -3904,30 +3904,14 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* int32_t streamStateFillPut_rocksdb(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { int code = 0; - char* dst = NULL; - size_t size = 0; - code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); - if (code != 0) { - return code; - } - STREAM_STATE_PUT_ROCKSDB(pState, "fill", key, dst, size); - - taosMemoryFree(dst); + STREAM_STATE_PUT_ROCKSDB(pState, "fill", key, value, vLen); return code; } int32_t streamStateFillGet_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { int code = 0; - char* tVal; - size_t tValLen = 0; - STREAM_STATE_GET_ROCKSDB(pState, "fill", key, &tVal, &tValLen); - if (code != 0) { - taosMemoryFree(tVal); - return code; - } - code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); - taosMemoryFree(tVal); + STREAM_STATE_GET_ROCKSDB(pState, "fill", key, pVal, pVLen); return code; } int32_t streamStateFillDel_rocksdb(SStreamState* pState, const SWinKey* key) { @@ -4267,7 +4251,7 @@ int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, cons int code = 0; char* dst = NULL; size_t size = 0; - code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); if (code != 0) { return code; } @@ -4285,7 +4269,7 @@ int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void taosMemoryFree(tVal); return code; } - code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)tagVal, (size_t*)tagLen); + code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)tagVal, (size_t*)tagLen); taosMemoryFree(tVal); return code; @@ -4294,14 +4278,7 @@ int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void // parname cfg int32_t streamStatePutParName_rocksdb(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) { int code = 0; - char* dst = NULL; - size_t size = 0; - code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, tbname, TSDB_TABLE_NAME_LEN, &dst, &size); - if (code != 0) { - return code; - } - STREAM_STATE_PUT_ROCKSDB(pState, "parname", &groupId, (char*)dst, size); - taosMemoryFree(dst); + STREAM_STATE_PUT_ROCKSDB(pState, "parname", &groupId, (char*)tbname, TSDB_TABLE_NAME_LEN); return code; } int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, void** pVal) { @@ -4313,29 +4290,12 @@ int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, voi int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen) { int code = 0; - char* dst = NULL; - size_t size = 0; - code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, pVal, pVLen, &dst, &size); - if (code != 0) { - return code; - } - STREAM_STATE_PUT_ROCKSDB(pState, "default", key, dst, size); - taosMemoryFree(dst); + STREAM_STATE_PUT_ROCKSDB(pState, "default", key, pVal, pVLen); return code; } int32_t streamDefaultGet_rocksdb(SStreamState* pState, const void* key, void** pVal, int32_t* pVLen) { int code = 0; - char* tVal; - size_t tValLen = 0; STREAM_STATE_GET_ROCKSDB(pState, "default", key, pVal, pVLen); - if (code != 0) { - taosMemoryFree(tVal); - return code; - } - - code = (pState->pResultRowStore->resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); - taosMemoryFree(tVal); - return code; } int32_t streamDefaultDel_rocksdb(SStreamState* pState, const void* key) { @@ -4481,7 +4441,7 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb char* dst = NULL; size_t size = 0; - int32_t code = (pState->pResultRowStore->resultRowPut)(pState->pExprSupp, val, vlen, &dst, &size); + int32_t code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, val, vlen, &dst, &size); if (code != 0) { return code; } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 1994c882aa..4c83f1b109 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -528,6 +528,9 @@ void streamStateCopyBackend(SStreamState* src, SStreamState* dst) { } dst->dump = 1; dst->pTdbState->pOwner->pBackend = src->pTdbState->pOwner->pBackend; + dst->pResultRowStore.resultRowPut = src->pResultRowStore.resultRowPut; + dst->pResultRowStore.resultRowGet = src->pResultRowStore.resultRowGet; + dst->pExprSupp = src->pExprSupp; return; } SStreamStateCur* createStreamStateCursor() { From 5aeb1ec2ad75de2e08ff6042b61d78c238464fd1 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 10 Oct 2024 21:10:15 +0800 Subject: [PATCH 16/72] add interface --- source/libs/stream/inc/streamBackendRocksdb.h | 17 +-- source/libs/stream/src/streamBackendRocksdb.c | 102 +++++++++++++----- source/libs/stream/src/streamSessionState.c | 24 +++-- source/libs/stream/src/tstreamFileState.c | 2 +- 4 files changed, 96 insertions(+), 49 deletions(-) diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h index 567d9de949..c4cf6a47cd 100644 --- a/source/libs/stream/inc/streamBackendRocksdb.h +++ b/source/libs/stream/inc/streamBackendRocksdb.h @@ -80,7 +80,7 @@ typedef struct { TdThreadRwlock chkpDirLock; int64_t dataWritten; - void* pMeta; + void* pMeta; int8_t removeAllFiles; } STaskDbWrapper; @@ -153,7 +153,7 @@ void taskDbUpdateChkpId(void* pTaskDb, int64_t chkpId); void* taskDbAddRef(void* pTaskDb); void taskDbRemoveRef(void* pTaskDb); -void taskDbSetClearFileFlag(void* pTaskDb); +void taskDbSetClearFileFlag(void* pTaskDb); int streamStateOpenBackend(void* backend, SStreamState* pState); void streamStateCloseBackend(SStreamState* pState, bool remove); @@ -191,7 +191,8 @@ SStreamStateCur* streamStateSessionSeekKeyPrev_rocksdb(SStreamState* pState, con SStreamStateCur* streamStateSessionSeekToLast_rocksdb(SStreamState* pState, int64_t groupId); int32_t streamStateSessionCurPrev_rocksdb(SStreamStateCur* pCur); -int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen); +int32_t streamStateSessionGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SSessionKey* pKey, + void** pVal, int32_t* pVLen); int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSessionKey* key, SSessionKey* curKey); int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen); @@ -255,11 +256,11 @@ int32_t taskDbDestroySnap(void* arg, SArray* pSnapInfo); int32_t taskDbDoCheckpoint(void* arg, int64_t chkpId, int64_t processId); -int32_t bkdMgtCreate(char* path, SBkdMgt **bm); -int32_t bkdMgtAddChkp(SBkdMgt* bm, char* task, char* path); -int32_t bkdMgtGetDelta(SBkdMgt* bm, char* taskId, int64_t chkpId, SArray* list, char* name); -int32_t bkdMgtDumpTo(SBkdMgt* bm, char* taskId, char* dname); -void bkdMgtDestroy(SBkdMgt* bm); +int32_t bkdMgtCreate(char* path, SBkdMgt** bm); +int32_t bkdMgtAddChkp(SBkdMgt* bm, char* task, char* path); +int32_t bkdMgtGetDelta(SBkdMgt* bm, char* taskId, int64_t chkpId, SArray* list, char* name); +int32_t bkdMgtDumpTo(SBkdMgt* bm, char* taskId, char* dname); +void bkdMgtDestroy(SBkdMgt* bm); int32_t taskDbGenChkpUploadData(void* arg, void* bkdMgt, int64_t chkpId, int8_t type, char** path, SArray* list, const char* id); diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 8a487ffeae..d469580d04 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -3291,13 +3291,16 @@ int32_t streamStatePut_rocksdb(SStreamState* pState, const SWinKey* key, const v SStateKey sKey = {.key = *key, .opNum = pState->number}; char* dst = NULL; size_t size = 0; - code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); - if (code != 0) { - return code; + if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) { + STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)value, (int32_t)vLen); + } else { + code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + if (code != 0) { + return code; + } + STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)dst, (int32_t)size); + taosMemoryFree(dst); } - STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)dst, (int32_t)size); - - taosMemoryFree(dst); return code; } int32_t streamStateGet_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { @@ -3311,7 +3314,11 @@ int32_t streamStateGet_rocksdb(SStreamState* pState, const SWinKey* key, void** taosMemoryFree(tVal); return code; } - + if (pState->pResultRowStore.resultRowGet == NULL || pState->pExprSupp == NULL) { + *pVal = tVal; + *pVLen = tValLen; + return code; + } code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); taosMemoryFree(tVal); return code; @@ -3580,6 +3587,13 @@ int32_t streamStateFuncGet_rocksdb(SStreamState* pState, const STupleKey* key, v taosMemoryFree(tVal); return code; } + + if (pState->pResultRowStore.resultRowGet == NULL || pState->pExprSupp == NULL) { + *pVal = tVal; + *pVLen = tValLen; + return code; + } + code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); taosMemoryFree(tVal); @@ -3600,6 +3614,11 @@ int32_t streamStateSessionPut_rocksdb(SStreamState* pState, const SSessionKey* k } char* dst = NULL; size_t size = 0; + if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) { + STREAM_STATE_PUT_ROCKSDB(pState, "sess", &sKey, (void*)value, (int32_t)vLen); + return code; + } + code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); if (code != 0) { return code; @@ -3617,7 +3636,7 @@ int32_t streamStateSessionGet_rocksdb(SStreamState* pState, SSessionKey* key, vo void* tmp = NULL; int32_t vLen = 0; - code = streamStateSessionGetKVByCur_rocksdb(pCur, &resKey, &tmp, &vLen); + code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &resKey, &tmp, &vLen); if (code == 0 && key->win.skey == resKey.win.skey) { *key = resKey; @@ -3856,7 +3875,8 @@ SStreamStateCur* streamStateSessionSeekKeyPrev_rocksdb(SStreamState* pState, con return pCur; } -int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen) { +int32_t streamStateSessionGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SSessionKey* pKey, + void** pVal, int32_t* pVLen) { if (!pCur) { return -1; } @@ -3890,13 +3910,27 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* return -1; } + char* tVal = val; + size_t tVlen = len; + if (pVal != NULL) { - *pVal = (char*)val; + if (pState != NULL && pState->pResultRowStore.resultRowGet != NULL && pState->pExprSupp != NULL) { + int code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, val, len, (char**)&tVal, (size_t*)&tVlen); + if (code != 0) { + taosMemoryFree(val); + return code; + } + taosMemoryFree(val); + *pVal = (char*)tVal; + } else { + *pVal = (char*)tVal; + } } else { taosMemoryFree(val); } - if (pVLen != NULL) *pVLen = len; + if (pVLen != NULL) *pVLen = tVlen; + *pKey = pKTmp->key; return 0; } @@ -4085,7 +4119,7 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes c = stateSessionKeyCmpr(&sKey, sizeof(sKey), &iKey, sizeof(iKey)); SSessionKey resKey = *key; - int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, &resKey, NULL, 0); + int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &resKey, NULL, NULL); if (code == 0 && sessionRangeKeyCmpr(key, &resKey) == 0) { *curKey = resKey; streamStateFreeCur(pCur); @@ -4094,7 +4128,7 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes if (c > 0) { streamStateCurNext_rocksdb(pCur); - code = streamStateSessionGetKVByCur_rocksdb(pCur, &resKey, NULL, 0); + code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &resKey, NULL, NULL); if (code == 0 && sessionRangeKeyCmpr(key, &resKey) == 0) { *curKey = resKey; streamStateFreeCur(pCur); @@ -4102,7 +4136,7 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes } } else if (c < 0) { streamStateCurPrev(pState, pCur); - code = streamStateSessionGetKVByCur_rocksdb(pCur, &resKey, NULL, 0); + code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &resKey, NULL, NULL); if (code == 0 && sessionRangeKeyCmpr(key, &resKey) == 0) { *curKey = resKey; streamStateFreeCur(pCur); @@ -4132,7 +4166,7 @@ int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKe } SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev_rocksdb(pState, key); - int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, key, pVal, pVLen); + int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen); if (code == 0) { if (sessionRangeKeyCmpr(&searchKey, key) == 0) { @@ -4149,7 +4183,7 @@ int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKe pCur = streamStateSessionSeekKeyNext_rocksdb(pState, key); } - code = streamStateSessionGetKVByCur_rocksdb(pCur, key, pVal, pVLen); + code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen); if (code == 0) { if (sessionRangeKeyCmpr(&searchKey, key) == 0) { memcpy(tmp, *pVal, *pVLen); @@ -4176,7 +4210,7 @@ void streamStateSessionClear_rocksdb(SStreamState* pState) { SSessionKey delKey = {0}; void* buf = NULL; int32_t size = 0; - int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, &delKey, &buf, &size); + int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &delKey, &buf, &size); if (code == 0 && size > 0) { memset(buf, 0, size); // refactor later @@ -4204,7 +4238,7 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* } SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev_rocksdb(pState, key); - int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, key, pVal, pVLen); + int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen); if (code == 0) { if (key->win.skey <= tmpKey.win.skey && tmpKey.win.ekey <= key->win.ekey) { memcpy(tmp, *pVal, valSize); @@ -4224,7 +4258,7 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* pCur = streamStateSessionSeekKeyNext_rocksdb(pState, key); } taosMemoryFreeClear(*pVal); - code = streamStateSessionGetKVByCur_rocksdb(pCur, key, pVal, pVLen); + code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen); if (code == 0) { void* stateKey = (char*)(*pVal) + (valSize - keyDataLen); if (fn(pKeyData, stateKey) == true) { @@ -4251,6 +4285,10 @@ int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, cons int code = 0; char* dst = NULL; size_t size = 0; + if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) { + STREAM_STATE_PUT_ROCKSDB(pState, "partag", &groupId, tag, tagLen); + return code; + } code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); if (code != 0) { return code; @@ -4277,7 +4315,7 @@ int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void #endif // parname cfg int32_t streamStatePutParName_rocksdb(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) { - int code = 0; + int code = 0; STREAM_STATE_PUT_ROCKSDB(pState, "parname", &groupId, (char*)tbname, TSDB_TABLE_NAME_LEN); return code; } @@ -4289,12 +4327,12 @@ int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, voi } int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen) { - int code = 0; + int code = 0; STREAM_STATE_PUT_ROCKSDB(pState, "default", key, pVal, pVLen); return code; } int32_t streamDefaultGet_rocksdb(SStreamState* pState, const void* key, void** pVal, int32_t* pVLen) { - int code = 0; + int code = 0; STREAM_STATE_GET_ROCKSDB(pState, "default", key, pVal, pVLen); return code; } @@ -4437,15 +4475,21 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfKeyName, rocksdb int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key, void* val, int32_t vlen, int64_t ttl, void* tmpBuf) { - char buf[128] = {0}; + int32_t code = 0; + char buf[128] = {0}; - char* dst = NULL; - size_t size = 0; - int32_t code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, val, vlen, &dst, &size); - if (code != 0) { - return code; + char* dst = NULL; + size_t size = 0; + if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) { + dst = val; + size = vlen; + return -1; + } else { + code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, val, vlen, &dst, &size); + if (code != 0) { + return code; + } } - int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf); char* ttlV = tmpBuf; int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(dst, size, ttl, &ttlV); diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c index 7e3d8d59f9..bb8ea6c03c 100644 --- a/source/libs/stream/src/streamSessionState.c +++ b/source/libs/stream/src/streamSessionState.c @@ -284,7 +284,7 @@ _end: int32_t getSessionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen, int32_t* pWinCode) { - SWinKey* pTmpkey = pKey; + SWinKey* pTmpkey = pKey; SSessionKey pWinKey = {.groupId = pTmpkey->groupId, .win.skey = pTmpkey->ts, .win.ekey = pTmpkey->ts}; return getSessionWinResultBuff(pFileState, &pWinKey, 0, pVal, pVLen, pWinCode); } @@ -343,7 +343,8 @@ _end: return code; } -int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen, int32_t* pWinCode) { +int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); @@ -353,7 +354,7 @@ int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, v } pNewPos->needFree = true; pNewPos->beFlushed = true; - void* pBuff = NULL; + void* pBuff = NULL; (*pWinCode) = streamStateSessionGet_rocksdb(getStateFileStore(pFileState), pKey, &pBuff, pVLen); if ((*pWinCode) != TSDB_CODE_SUCCESS) { goto _end; @@ -575,7 +576,7 @@ static void transformCursor(SStreamFileState* pFileState, SStreamStateCur* pCur) static void checkAndTransformCursor(SStreamFileState* pFileState, const uint64_t groupId, SArray* pWinStates, SStreamStateCur** ppCur) { SSessionKey key = {.groupId = groupId}; - int32_t code = streamStateSessionGetKVByCur_rocksdb(*ppCur, &key, NULL, NULL); + int32_t code = streamStateSessionGetKVByCur_rocksdb(NULL, *ppCur, &key, NULL, NULL); if (taosArrayGetSize(pWinStates) > 0 && (code == TSDB_CODE_FAILED || sessionStateKeyCompare(&key, pWinStates, 0) >= 0)) { if (!(*ppCur)) { @@ -653,7 +654,7 @@ SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SS SSessionKey key = {0}; void* pVal = NULL; int len = 0; - int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, &key, &pVal, &len); + int32_t code = streamStateSessionGetKVByCur_rocksdb(NULL, pCur, &key, &pVal, &len); if (code == TSDB_CODE_FAILED) { streamStateFreeCur(pCur); return pBuffCur; @@ -667,7 +668,7 @@ SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SS } streamStateCurPrev(pFileStore, pCur); while (1) { - code = streamStateSessionGetKVByCur_rocksdb(pCur, &key, &pVal, &len); + code = streamStateSessionGetKVByCur_rocksdb(NULL, pCur, &key, &pVal, &len); if (code == TSDB_CODE_FAILED) { streamStateCurNext(pFileStore, pCur); return pCur; @@ -710,7 +711,7 @@ int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void *pKey = *(SSessionKey*)(pPos->pKey); } else { void* pData = NULL; - code = streamStateSessionGetKVByCur_rocksdb(pCur, pKey, &pData, pVLen); + code = streamStateSessionGetKVByCur_rocksdb(NULL, pCur, pKey, &pData, pVLen); if (taosArrayGetSize(pWinStates) > 0 && (code == TSDB_CODE_FAILED || sessionStateRangeKeyCompare(pKey, pWinStates, 0) >= 0)) { transformCursor(pCur->pStreamFileState, pCur); @@ -915,7 +916,7 @@ _end: int32_t getCountWinStateFromDisc(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen) { SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentNext_rocksdb(pState, pKey); - int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, pKey, pVal, pVLen); + int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, pKey, pVal, pVLen); streamStateFreeCur(pCur); if (code == TSDB_CODE_SUCCESS) { return code; @@ -923,7 +924,7 @@ int32_t getCountWinStateFromDisc(SStreamState* pState, SSessionKey* pKey, void** pCur = streamStateSessionSeekKeyPrev_rocksdb(pState, pKey); } - code = streamStateSessionGetKVByCur_rocksdb(pCur, pKey, pVal, pVLen); + code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, pKey, pVal, pVLen); streamStateFreeCur(pCur); return code; } @@ -1060,7 +1061,8 @@ _end: return code; } -int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen) { +int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, + int32_t* pVLen) { SSessionKey* pWinKey = pKey; const TSKEY gap = 0; int32_t code = TSDB_CODE_SUCCESS; @@ -1098,7 +1100,7 @@ int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey QUERY_CHECK_CODE(code, lino, _end); } qDebug("===stream===0 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, - pWinKey->win.ekey, code_file); + pWinKey->win.ekey, code_file); } } else { code = addNewSessionWindow(pFileState, pWinStates, pWinKey, (SRowBuffPos**)pVal); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 7237f23671..6a102743cd 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -849,7 +849,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId) { void* pVal = NULL; int32_t vlen = 0; SSessionKey key = {0}; - winRes = streamStateSessionGetKVByCur_rocksdb(pCur, &key, &pVal, &vlen); + winRes = streamStateSessionGetKVByCur_rocksdb(NULL, pCur, &key, &pVal, &vlen); if (winRes != TSDB_CODE_SUCCESS) { break; } From 193220aa88c695d89b43e8bb93ef6dfdd66d4c8a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 10 Oct 2024 21:28:13 +0800 Subject: [PATCH 17/72] add interface --- source/libs/stream/src/streamBackendRocksdb.c | 6 +++++- source/libs/stream/test/backendTest.cpp | 6 +++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index d469580d04..f91c26638a 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -3307,7 +3307,7 @@ int32_t streamStateGet_rocksdb(SStreamState* pState, const SWinKey* key, void** int code = 0; SStateKey sKey = {.key = *key, .opNum = pState->number}; - char* tVal; + char* tVal = NULL; size_t tValLen = 0; STREAM_STATE_GET_ROCKSDB(pState, "state", &sKey, &tVal, &tValLen); if (code != 0) { @@ -3569,6 +3569,10 @@ int32_t streamStateFuncPut_rocksdb(SStreamState* pState, const STupleKey* key, c int code = 0; char* dst = NULL; size_t size = 0; + if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) { + STREAM_STATE_PUT_ROCKSDB(pState, "func", key, (void*)value, (int32_t)vLen); + return code; + } code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); if (code != 0) { return code; diff --git a/source/libs/stream/test/backendTest.cpp b/source/libs/stream/test/backendTest.cpp index e7e7149882..1518d22fe9 100644 --- a/source/libs/stream/test/backendTest.cpp +++ b/source/libs/stream/test/backendTest.cpp @@ -228,17 +228,17 @@ void *backendOpen() { memset(&key, 0, sizeof(key)); char *val = NULL; int32_t vlen = 0; - code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen); + code = streamStateSessionGetKVByCur_rocksdb(NULL, pCurr, &key, (void **)&val, &vlen); ASSERT(code == 0); pCurr = streamStateSessionSeekKeyPrev_rocksdb(p, &key); - code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen); + code = streamStateSessionGetKVByCur_rocksdb(NULL, pCurr, &key, (void **)&val, &vlen); ASSERT(code == 0); ASSERT(key.groupId == 0 && key.win.ekey == tsArray[tsArray.size() - 2]); pCurr = streamStateSessionSeekKeyNext_rocksdb(p, &key); - code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen); + code = streamStateSessionGetKVByCur_rocksdb(NULL, pCurr, &key, (void **)&val, &vlen); ASSERT(code == 0); ASSERT(vlen == strlen("Value")); ASSERT(key.groupId == 0 && key.win.skey == tsArray[tsArray.size() - 1]); From cf3db4f1a6b911710240d6a59c5790a8a1cc35ae Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Fri, 11 Oct 2024 08:43:05 +0800 Subject: [PATCH 18/72] fix:[TD-32184] fix heap use after free. --- source/libs/executor/src/executil.c | 35 +++++--------- .../executor/src/streamtimewindowoperator.c | 28 +++++++---- source/libs/stream/inc/streamBackendRocksdb.h | 2 +- source/libs/stream/src/streamBackendRocksdb.c | 47 ++++++++++++++++--- source/libs/stream/src/streamSessionState.c | 6 +-- source/libs/stream/src/streamState.c | 2 +- source/libs/stream/src/tstreamFileState.c | 5 +- 7 files changed, 77 insertions(+), 48 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 94b89ab64c..fd67468731 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -88,22 +88,20 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) { rowSize += pCtx[i].resDataInfo.interBufSize; } - rowSize += (numOfOutput * sizeof(bool)); - // expand rowSize to mark if col is null for top/bottom result(saveTupleData) return rowSize; } // Convert buf read from rocksdb to result row int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize) { + if (inBuf == NULL || pSup == NULL) { + qError("invalid input parameters, inBuf:%p, pSup:%p", inBuf, pSup); + return TSDB_CODE_INVALID_PARA; + } SqlFunctionCtx *pCtx = pSup->pCtx; int32_t *offset = pSup->rowEntryInfoOffset; SResultRow *pResultRow = NULL; size_t processedSize = 0; int32_t code = TSDB_CODE_SUCCESS; - if (inBuf == NULL) { - qError("invalid input buffer, inBuf:%p", inBuf); - return TSDB_CODE_INVALID_PARA; - } // calculate the size of output buffer *outBufSize = getResultRowSize(pCtx, pSup->numOfExprs); @@ -116,6 +114,7 @@ int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize (void)memcpy(pResultRow, inBuf, sizeof(SResultRow)); inBuf += sizeof(SResultRow); processedSize += sizeof(SResultRow); + for (int32_t i = 0; i < pSup->numOfExprs; ++i) { int32_t len = *(int32_t*)inBuf; inBuf += sizeof(int32_t); @@ -132,12 +131,6 @@ int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize inBuf += len; processedSize += len; } - void *pos = getResultEntryInfo(pResultRow, pSup->numOfExprs - 1, offset) + - sizeof(SResultRowEntryInfo) + - pCtx[pSup->numOfExprs - 1].resDataInfo.interBufSize; - (void)memcpy(pos, inBuf, pSup->numOfExprs * sizeof(bool)); - inBuf += pSup->numOfExprs * sizeof(bool); - processedSize += pSup->numOfExprs * sizeof(bool); if (processedSize < inBufSize) { // stream stores extra data after result row @@ -147,7 +140,7 @@ int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize qError("failed to reallocate memory for output buffer, size:%zu", *outBufSize + leftLen); return terrno; } - (void)memcpy(*outBuf + processedSize, inBuf, leftLen); + (void)memcpy(*outBuf + *outBufSize, inBuf, leftLen); inBuf += leftLen; processedSize += leftLen; *outBufSize += leftLen; @@ -157,15 +150,16 @@ int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize // Convert result row to buf for rocksdb int32_t putResultRowToBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize) { + if (pSup == NULL || inBuf == NULL || outBuf == NULL || outBufSize == NULL) { + qError("invalid input parameters, inBuf:%p, pSup:%p, outBufSize:%p, outBuf:%p", inBuf, pSup, outBufSize, outBuf); + return TSDB_CODE_INVALID_PARA; + } + SqlFunctionCtx *pCtx = pSup->pCtx; int32_t *offset = pSup->rowEntryInfoOffset; SResultRow *pResultRow = (SResultRow*)inBuf; size_t rowSize = getResultRowSize(pCtx, pSup->numOfExprs); - if (inBuf == NULL) { - qError("invalid input buffer, inBuf:%p", inBuf); - return TSDB_CODE_INVALID_PARA; - } if (rowSize > inBufSize) { qError("invalid input buffer size, rowSize:%zu, inBufSize:%zu", rowSize, inBufSize); return TSDB_CODE_INVALID_PARA; @@ -195,13 +189,6 @@ int32_t putResultRowToBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, pBuf += len; } - // mark if col is null for top/bottom result(saveTupleData) - void *pos = getResultEntryInfo(pResultRow, pSup->numOfExprs - 1, offset) + - sizeof(SResultRowEntryInfo) + - pCtx[pSup->numOfExprs - 1].resDataInfo.interBufSize; - - (void)memcpy(pBuf, pos, pSup->numOfExprs * sizeof(bool)); - if (rowSize < inBufSize) { // stream stores extra data after result row size_t leftLen = inBufSize - rowSize; diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index fcd58fbe56..fc919dfe5f 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -2006,6 +2006,12 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN pInfo->stateStore = pTaskInfo->storageAPI.stateStore; int32_t funResSize = getMaxFunResSize(&pOperator->exprSupp, numOfCols); pInfo->pState->pFileState = NULL; + + // used for backward compatibility of function's result info + pInfo->pState->pResultRowStore.resultRowGet = getResultRowFromBuf; + pInfo->pState->pResultRowStore.resultRowPut = putResultRowToBuf; + pInfo->pState->pExprSupp = &pOperator->exprSupp; + code = pAPI->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState, pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), @@ -2223,6 +2229,12 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in pSup->stateStore.streamStateSetNumber(pSup->pState, -1, tsIndex); int32_t funResSize = getMaxFunResSize(pExpSup, numOfOutput); pSup->pState->pFileState = NULL; + + // used for backward compatibility of function's result info + pSup->pState->pResultRowStore.resultRowGet = getResultRowFromBuf; + pSup->pState->pResultRowStore.resultRowPut = putResultRowToBuf; + pSup->pState->pExprSupp = pExpSup; + code = pSup->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SSessionKey), pSup->resultRowSize, funResSize, sesionTs, pSup->pState, pTwAggSup->deleteMark, taskIdStr, pHandle->checkpointId, STREAM_STATE_BUFF_SORT, &pSup->pState->pFileState); @@ -2232,11 +2244,6 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in pSup->pResultRows = tSimpleHashInit(32, hashFn); QUERY_CHECK_NULL(pSup->pResultRows, code, lino, _end, terrno); - // used for backward compatibility of function's result info - pSup->pState->pResultRowStore.resultRowGet = getResultRowFromBuf; - pSup->pState->pResultRowStore.resultRowPut = putResultRowToBuf; - pSup->pState->pExprSupp = pExpSup; - for (int32_t i = 0; i < numOfOutput; ++i) { pExpSup->pCtx[i].saveHandle.pState = pSup->pState; } @@ -5390,17 +5397,18 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->stateStore = pTaskInfo->storageAPI.stateStore; pInfo->pState->pFileState = NULL; - code = pTaskInfo->storageAPI.stateStore.streamFileStateInit( - tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState, - pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), pHandle->checkpointId, STREAM_STATE_BUFF_HASH, - &pInfo->pState->pFileState); - QUERY_CHECK_CODE(code, lino, _error); // used for backward compatibility of function's result info pInfo->pState->pResultRowStore.resultRowGet = getResultRowFromBuf; pInfo->pState->pResultRowStore.resultRowPut = putResultRowToBuf; pInfo->pState->pExprSupp = &pOperator->exprSupp; + code = pTaskInfo->storageAPI.stateStore.streamFileStateInit( + tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState, + pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), pHandle->checkpointId, STREAM_STATE_BUFF_HASH, + &pInfo->pState->pFileState); + QUERY_CHECK_CODE(code, lino, _error); + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED, pInfo, pTaskInfo); diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h index c4cf6a47cd..1e0801fb6b 100644 --- a/source/libs/stream/inc/streamBackendRocksdb.h +++ b/source/libs/stream/inc/streamBackendRocksdb.h @@ -169,7 +169,7 @@ int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key); int32_t streamStateGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t streamStateAddIfNotExist_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); void streamStateCurPrev_rocksdb(SStreamStateCur* pCur); -int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); +int32_t streamStateGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); SStreamStateCur* streamStateGetAndCheckCur_rocksdb(SStreamState* pState, SWinKey* key); SStreamStateCur* streamStateSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key); SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState); diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index f91c26638a..c0bde10774 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -3319,7 +3319,9 @@ int32_t streamStateGet_rocksdb(SStreamState* pState, const SWinKey* key, void** *pVLen = tValLen; return code; } - code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); + size_t pValLen = 0; + code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, &pValLen); + *pVLen = (int32_t)pValLen; taosMemoryFree(tVal); return code; } @@ -3376,7 +3378,7 @@ int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key) { } SStreamStateCur* pCur = streamStateSeekKeyNext_rocksdb(pState, &tmp); - code = streamStateGetKVByCur_rocksdb(pCur, key, NULL, 0); + code = streamStateGetKVByCur_rocksdb(pState, pCur, key, NULL, 0); if (code != 0) { return code; } @@ -3420,7 +3422,8 @@ void streamStateCurPrev_rocksdb(SStreamStateCur* pCur) { rocksdb_iter_prev(pCur->iter); } } -int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { +int32_t streamStateGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, + int32_t* pVLen) { if (!pCur) return -1; SStateKey tkey; SStateKey* pKtmp = &tkey; @@ -3436,7 +3439,35 @@ int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, cons if (pVLen != NULL) { size_t vlen = 0; const char* valStr = rocksdb_iter_value(pCur->iter, &vlen); - *pVLen = valueDecode((void*)valStr, vlen, NULL, (char**)pVal); + char* val = NULL; + int32_t len = valueDecode((void*)valStr, vlen, NULL, (char**)val); + if (len <= 0) { + taosMemoryFree(val); + return -1; + } + + char* tVal = val; + size_t tVlen = len; + + if (pVal != NULL) { + if (pState != NULL && pState->pResultRowStore.resultRowGet != NULL && pState->pExprSupp != NULL) { + int code = + (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, val, len, (char**)&tVal, (size_t*)&tVlen); + if (code != 0) { + taosMemoryFree(val); + return code; + } + taosMemoryFree(val); + *pVal = (char*)tVal; + } else { + stInfo("streamStateGetKVByCur_rocksdb, pState = %p, pResultRowStore = %p, pExprSupp = %p", pState, + pState->pResultRowStore.resultRowGet, pState->pExprSupp); + *pVal = (char*)tVal; + } + } else { + taosMemoryFree(val); + } + *pVLen = (int32_t)tVlen; } *pKey = pKtmp->key; @@ -3598,7 +3629,9 @@ int32_t streamStateFuncGet_rocksdb(SStreamState* pState, const STupleKey* key, v return code; } - code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, (size_t*)pVLen); + size_t pValLen = 0; + code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, &pValLen); + *pVLen = (int32_t)pValLen; taosMemoryFree(tVal); return code; @@ -3933,7 +3966,7 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamState* pState, SStreamStateC taosMemoryFree(val); } - if (pVLen != NULL) *pVLen = tVlen; + if (pVLen != NULL) *pVLen = (int32_t)tVlen; *pKey = pKTmp->key; return 0; @@ -4450,6 +4483,7 @@ void streamStateClearBatch(void* pBatch) { rocksdb_writebatch_clear((rocksdb_ void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); } int32_t streamStatePutBatch(SStreamState* pState, const char* cfKeyName, rocksdb_writebatch_t* pBatch, void* key, void* val, int32_t vlen, int64_t ttl) { + int32_t code = 0; STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; TAOS_UNUSED(atomic_add_fetch_64(&wrapper->dataWritten, 1)); @@ -4487,7 +4521,6 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) { dst = val; size = vlen; - return -1; } else { code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, val, vlen, &dst, &size); if (code != 0) { diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c index bb8ea6c03c..536636533f 100644 --- a/source/libs/stream/src/streamSessionState.c +++ b/source/libs/stream/src/streamSessionState.c @@ -576,7 +576,7 @@ static void transformCursor(SStreamFileState* pFileState, SStreamStateCur* pCur) static void checkAndTransformCursor(SStreamFileState* pFileState, const uint64_t groupId, SArray* pWinStates, SStreamStateCur** ppCur) { SSessionKey key = {.groupId = groupId}; - int32_t code = streamStateSessionGetKVByCur_rocksdb(NULL, *ppCur, &key, NULL, NULL); + int32_t code = streamStateSessionGetKVByCur_rocksdb(getStateFileStore(pFileState), *ppCur, &key, NULL, NULL); if (taosArrayGetSize(pWinStates) > 0 && (code == TSDB_CODE_FAILED || sessionStateKeyCompare(&key, pWinStates, 0) >= 0)) { if (!(*ppCur)) { @@ -654,7 +654,7 @@ SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SS SSessionKey key = {0}; void* pVal = NULL; int len = 0; - int32_t code = streamStateSessionGetKVByCur_rocksdb(NULL, pCur, &key, &pVal, &len); + int32_t code = streamStateSessionGetKVByCur_rocksdb(getStateFileStore(pFileState), pCur, &key, &pVal, &len); if (code == TSDB_CODE_FAILED) { streamStateFreeCur(pCur); return pBuffCur; @@ -711,7 +711,7 @@ int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void *pKey = *(SSessionKey*)(pPos->pKey); } else { void* pData = NULL; - code = streamStateSessionGetKVByCur_rocksdb(NULL, pCur, pKey, &pData, pVLen); + code = streamStateSessionGetKVByCur_rocksdb(getStateFileStore(pCur->pStreamFileState), pCur, pKey, &pData, pVLen); if (taosArrayGetSize(pWinStates) > 0 && (code == TSDB_CODE_FAILED || sessionStateRangeKeyCompare(pKey, pWinStates, 0) >= 0)) { transformCursor(pCur->pStreamFileState, pCur); diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 4c83f1b109..2e6a724912 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -302,7 +302,7 @@ SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key) { } int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { - return streamStateGetKVByCur_rocksdb(pCur, pKey, pVal, pVLen); + return streamStateGetKVByCur_rocksdb(getStateFileStore(pCur->pStreamFileState), pCur, pKey, pVal, pVLen); } int32_t streamStateFillGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 6a102743cd..424845e4f2 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -849,7 +849,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId) { void* pVal = NULL; int32_t vlen = 0; SSessionKey key = {0}; - winRes = streamStateSessionGetKVByCur_rocksdb(NULL, pCur, &key, &pVal, &vlen); + winRes = streamStateSessionGetKVByCur_rocksdb(getStateFileStore(pFileState), pCur, &key, &pVal, &vlen); if (winRes != TSDB_CODE_SUCCESS) { break; } @@ -903,7 +903,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) { QUERY_CHECK_CODE(code, lino, _end); } - winCode = streamStateGetKVByCur_rocksdb(pCur, pNewPos->pKey, (const void**)&pVal, &vlen); + winCode = streamStateGetKVByCur_rocksdb(getStateFileStore(pFileState), pCur, pNewPos->pKey, (const void**)&pVal, &vlen); if (winCode != TSDB_CODE_SUCCESS || pFileState->getTs(pNewPos->pKey) < pFileState->flushMark) { destroyRowBuffPos(pNewPos); SListNode* pNode = tdListPopTail(pFileState->usedBuffs); @@ -912,6 +912,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) { break; } if (vlen != pFileState->rowSize) { + qError("row size mismatch, expect:%d, actual:%d", pFileState->rowSize, vlen); code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; QUERY_CHECK_CODE(code, lino, _end); } From 9bd22e4e6162a308137c2bb373d298b332e7c7bf Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Mon, 14 Oct 2024 14:05:58 +0800 Subject: [PATCH 19/72] fix:[TD-32184] result row do decode only when version is different. --- source/libs/executor/src/executil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index fd67468731..a87f6f2789 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -119,7 +119,7 @@ int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize int32_t len = *(int32_t*)inBuf; inBuf += sizeof(int32_t); processedSize += sizeof(int32_t); - if (pCtx->fpSet.decode) { + if (pResultRow->version != FUNCTION_RESULT_INFO_VERSION && pCtx->fpSet.decode) { code = pCtx->fpSet.decode(&pCtx[i], inBuf, getResultEntryInfo(pResultRow, i, offset), pResultRow->version); if (code != TSDB_CODE_SUCCESS) { qError("failed to decode result row, code:%d", code); From 418319ef29a134003adae60091e8d7018dd38f2e Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 14 Oct 2024 14:23:25 +0800 Subject: [PATCH 20/72] tetst:modify comments --- tests/army/frame/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index a91866c0e1..b816095817 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -1842,7 +1842,7 @@ class TDCom: def compare_result_files(self, file1, file2): try: - # 使用 subprocess.run 来执行 diff/fc 命令 + # use subprocess.run to execute diff/fc commands # print(file1, file2) if platform.system().lower() != 'windows': cmd='diff' @@ -1850,7 +1850,7 @@ class TDCom: else: cmd='fc' result = subprocess.run([cmd, file1, file2], text=True, capture_output=True) - # 如果输出不为空,则打印差异 + # if result is not empty, print the differences and files name. Otherwise, the files are identical. if result.stdout: tdLog.debug(f"Differences between {file1} and {file2}") tdLog.notice(f"\r\n{result.stdout}") From 8cd92624ae3d49d486b8258b056c365925dc228e Mon Sep 17 00:00:00 2001 From: dmchen Date: Mon, 14 Oct 2024 09:10:06 +0000 Subject: [PATCH 21/72] fix/TD-32547-memleak --- source/dnode/mgmt/exe/dmMain.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index ddef0537f8..ba162bd84f 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "dmMgmt.h" #include "mnode.h" +#include "osFile.h" #include "tconfig.h" #include "tglobal.h" #include "version.h" @@ -415,6 +416,9 @@ int mainWindows(int argc, char **argv) { return code; } int ret = dmUpdateEncryptKey(global.encryptKey, toLogFile); + if (taosCloseFile(&pFile) != 0) { + encryptError("failed to close file:%p", pFile); + } taosCloseLog(); taosCleanupArgs(); return ret; From c1557c423f72a74e8c9e3ecc3674beb90f6a785d Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 01:12:52 +0000 Subject: [PATCH 22/72] fix/TD-32555-reset-actionpos-when-rollbak --- source/dnode/mnode/impl/src/mndTrans.c | 44 +++++++++++++++----------- source/util/src/terror.c | 2 +- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index fcf7905cd1..657601f5ae 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1252,8 +1252,9 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) { pAction->errCode = pRsp->code; pTrans->lastErrorNo = pRsp->code; - mInfo("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId, - mndTransStr(pAction->stage), action, pRsp->code, pAction->acceptableCode, pAction->retryCode); + mInfo("trans:%d, %s:%d response is received, received code:0x%x(%s), accept:0x%x(%s) retry:0x%x(%s)", transId, + mndTransStr(pAction->stage), action, pRsp->code, tstrerror(pRsp->code), pAction->acceptableCode, + tstrerror(pAction->acceptableCode), pAction->retryCode, tstrerror(pAction->retryCode)); } else { mInfo("trans:%d, invalid action, index:%d, code:0x%x", transId, action, pRsp->code); } @@ -1469,8 +1470,8 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->redoActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) { - mError("trans:%d, failed to execute redoActions since:%s, code:0x%x, topHalf:%d", pTrans->id, terrstr(), terrno, - topHalf); + mError("trans:%d, failed to execute redoActions since:%s, code:0x%x, topHalf(TransContext):%d", pTrans->id, + terrstr(), terrno, topHalf); } return code; } @@ -1478,7 +1479,8 @@ static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool t static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->undoActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) { - mError("trans:%d, failed to execute undoActions since %s. topHalf:%d", pTrans->id, terrstr(), topHalf); + mError("trans:%d, failed to execute undoActions since %s. topHalf(TransContext):%d", pTrans->id, terrstr(), + topHalf); } return code; } @@ -1486,7 +1488,8 @@ static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool t static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) { - mError("trans:%d, failed to execute commitActions since %s. topHalf:%d", pTrans->id, terrstr(), topHalf); + mError("trans:%d, failed to execute commitActions since %s. topHalf(TransContext):%d", pTrans->id, terrstr(), + topHalf); } return code; } @@ -1500,11 +1503,15 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr return code; } - mInfo("trans:%d, execute %d actions serial, current action:%d", pTrans->id, numOfActions, pTrans->actionPos); + mInfo("trans:%d, execute %d actions serial, begin at action:%d, stage:%s", pTrans->id, numOfActions, + pTrans->actionPos, mndTransStr(pTrans->stage)); for (int32_t action = pTrans->actionPos; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pActions, action); + mInfo("trans:%d, current action:%d, stage:%s, actionType(0:log,1:msg):%d", pTrans->id, pTrans->actionPos, + mndTransStr(pAction->stage), pAction->actionType); + code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf); if (code == 0) { if (pAction->msgSent) { @@ -1536,8 +1543,8 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr if (mndCannotExecuteTransAction(pMnode, topHalf)) { pTrans->lastErrorNo = code; pTrans->code = code; - mInfo("trans:%d, %s:%d, topHalf:%d, not execute next action, code:%s", pTrans->id, mndTransStr(pAction->stage), - action, topHalf, tstrerror(code)); + mInfo("trans:%d, %s:%d, topHalf(TransContext):%d, not execute next action, code:%s", pTrans->id, + mndTransStr(pAction->stage), action, topHalf, tstrerror(code)); break; } @@ -1561,7 +1568,8 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr break; } else if (code == pAction->retryCode || code == TSDB_CODE_SYN_PROPOSE_NOT_READY || code == TSDB_CODE_SYN_RESTORING || code == TSDB_CODE_SYN_NOT_LEADER) { - mInfo("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code); + mInfo("trans:%d, %s:%d receive code:0x%x(%s) and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, + code, tstrerror(code)); pTrans->lastErrorNo = code; taosMsleep(300); action--; @@ -1570,8 +1578,8 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr terrno = code; pTrans->lastErrorNo = code; pTrans->code = code; - mInfo("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id, - mndTransStr(pAction->stage), pAction->id, code, pTrans->failedTimes); + mInfo("trans:%d, %s:%d receive code:0x%x(%s) and wait another schedule, failedTimes:%d", pTrans->id, + mndTransStr(pAction->stage), pAction->id, code, tstrerror(code), pTrans->failedTimes); break; } } @@ -1647,8 +1655,8 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool } else { continueExec = false; } - mInfo("trans:%d, cannot execute redo action stage, topHalf:%d, continueExec:%d, code:%s", pTrans->id, topHalf, - continueExec, tstrerror(code)); + mInfo("trans:%d, cannot execute redo action stage, topHalf(TransContext):%d, continueExec:%d, code:%s", pTrans->id, + topHalf, continueExec, tstrerror(code)); return continueExec; } @@ -1680,7 +1688,9 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool } pTrans->stage = TRN_STAGE_ROLLBACK; - mError("trans:%d, stage from redoAction to rollback since %s", pTrans->id, terrstr()); + pTrans->actionPos = 0; + mError("trans:%d, stage from redoAction to rollback since %s, and set actionPos to %d", pTrans->id, terrstr(), + pTrans->actionPos); continueExec = true; } else { mError("trans:%d, stage keep on redoAction since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes); @@ -1773,8 +1783,6 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool to if (code == 0) { pTrans->stage = TRN_STAGE_UNDO_ACTION; - pTrans->actionPos = 0; - mInfo("trans:%d, stage from rollback to undoAction, actionPos:%d", pTrans->id, pTrans->actionPos); continueExec = true; } else { pTrans->failedTimes++; @@ -1829,7 +1837,7 @@ void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = true; while (continueExec) { - mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf:%d", pTrans->id, + mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf(TransContext):%d", pTrans->id, mndTransStr(pTrans->stage), pTrans->createdTime, topHalf); pTrans->lastExecTime = taosGetTimestampMs(); switch (pTrans->stage) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 1c94a5f2e4..35f0bc73eb 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -314,7 +314,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CLOG_IS_NULL, "Transaction commitlog TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL, "Unable to establish connection While execute transaction and will continue in the background") TAOS_DEFINE_ERROR(TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED, "Last Transaction not finished") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CTX_SWITCH, "Transaction context switch") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CTX_SWITCH, "Wrong transaction execution context") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CONFLICT_COMPACT, "Transaction not completed due to conflict with compact") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error") From 8663779def87d90d1702924f5d1c81a34633e73c Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 03:56:47 +0000 Subject: [PATCH 23/72] fix/wal-load-file-set --- source/libs/wal/src/walMeta.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 9ade5e5638..cb9f6e2dfe 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -253,6 +253,7 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) int j = 0; // both of the lists in asc order + /* for (int i = 0; i < actualFileNum; i++) { SWalFileInfo* pLogInfo = taosArrayGet(actualLogList, i); while (j < metaFileNum) { @@ -268,6 +269,7 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } } } + */ taosArrayClear(metaLogList); @@ -400,6 +402,17 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) { TAOS_RETURN(TSDB_CODE_SUCCESS); } +void printFileSet(SArray* fileSet) { + int32_t sz = taosArrayGetSize(fileSet); + for (int32_t i = 0; i < sz; i++) { + SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i); + wInfo("firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 ", createTs:%" PRId64 + ", closeTs:%" PRId64, + pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, pFileInfo->createTs, + pFileInfo->closeTs); + } +} + int32_t walCheckAndRepairMeta(SWal* pWal) { // load log files, get first/snapshot/last version info int32_t code = 0; @@ -460,6 +473,9 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { taosArraySort(actualLog, compareWalFileInfo); + wInfo("vgId:%d, wal path:%s, actual log file num:%" PRId64, pWal->cfg.vgId, pWal->path, taosArrayGetSize(actualLog)); + printFileSet(actualLog); + int metaFileNum = taosArrayGetSize(pWal->fileInfoSet); int actualFileNum = taosArrayGetSize(actualLog); int64_t firstVerPrev = pWal->vers.firstVer; @@ -474,6 +490,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { TAOS_RETURN(code); } + wInfo("vgId:%d, wal path:%s, meta log file num:%" PRId64, pWal->cfg.vgId, pWal->path, + taosArrayGetSize(pWal->fileInfoSet)); + printFileSet(pWal->fileInfoSet); + int32_t sz = taosArrayGetSize(pWal->fileInfoSet); // scan and determine the lastVer @@ -1124,6 +1144,10 @@ int32_t walLoadMeta(SWal* pWal) { (void)taosCloseFile(&pFile); taosMemoryFree(buf); + wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%" PRId64, pWal->cfg.vgId, fnameStr, + taosArrayGetSize(pWal->fileInfoSet)); + printFileSet(pWal->fileInfoSet); + TAOS_RETURN(code); } From a00dfb52e10760423ae2f0b87fd9f57feb2f3d25 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 15 Oct 2024 14:00:15 +0800 Subject: [PATCH 24/72] fix:[TS-5528] insert error in sml --- source/client/inc/clientSml.h | 1 + source/client/src/clientSml.c | 8 ++++++-- source/client/src/clientSmlLine.c | 4 ++++ tests/system-test/2-query/sml.py | 4 ++++ utils/test/c/sml_test.c | 32 +++++++++++++++++++++++++++++++ 5 files changed, 47 insertions(+), 2 deletions(-) diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 209c376f30..a6aca2fddf 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -199,6 +199,7 @@ typedef struct { SArray *preLineTagKV; SArray *maxTagKVs; SArray *maxColKVs; + SArray *escapedStringList; SSmlLineInfo preLine; STableMeta *currSTableMeta; diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index d5cca55701..80f583bbee 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -479,6 +479,7 @@ int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs) } clearColValArraySml(info->currTableDataCtx->pValues); + taosArrayClearP(info->escapedStringList, taosMemoryFree); if (unlikely(ret != TSDB_CODE_SUCCESS)) { smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); return ret; @@ -1608,6 +1609,7 @@ void smlDestroyInfo(SSmlHandle *info) { taosArrayDestroy(info->valueJsonArray); taosArrayDestroyEx(info->preLineTagKV, freeSSmlKv); + taosArrayDestroyP(info->escapedStringList, taosMemoryFree); if (!info->dataFormat) { for (int i = 0; i < info->lineNum; i++) { @@ -1667,8 +1669,9 @@ int32_t smlBuildSmlInfo(TAOS *taos, SSmlHandle **handle) { info->tagJsonArray = taosArrayInit(8, POINTER_BYTES); info->valueJsonArray = taosArrayInit(8, POINTER_BYTES); info->preLineTagKV = taosArrayInit(8, sizeof(SSmlKv)); - - if (info->tagJsonArray == NULL || info->valueJsonArray == NULL || info->preLineTagKV == NULL) { + info->escapedStringList = taosArrayInit(8, POINTER_BYTES); + if (info->tagJsonArray == NULL || info->valueJsonArray == NULL || + info->preLineTagKV == NULL || info->escapedStringList == NULL) { uError("SML:0x%" PRIx64 " failed to allocate memory", info->id); code = terrno; goto FAILED; @@ -1949,6 +1952,7 @@ int32_t smlClearForRerun(SSmlHandle *info) { } } + taosArrayClearP(info->escapedStringList, taosMemoryFree); (void)memset(&info->preLine, 0, sizeof(SSmlLineInfo)); info->currSTableMeta = NULL; info->currTableDataCtx = NULL; diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index e620ca9b0c..fabda47ae1 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -451,6 +451,10 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL if (info->dataFormat) { bool isAligned = isSmlColAligned(info, cnt, &kv); + if (kv.type == TSDB_DATA_TYPE_BINARY && valueEscaped) { + taosArrayPush(info->escapedStringList, &kv.value); + kv.value = NULL; + } freeSSmlKv(&kv); if(!isAligned){ return TSDB_CODE_SUCCESS; diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index e28f3b1edd..4a8760cd67 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -105,6 +105,10 @@ class TDTestCase: tdSql.query(f"select * from ts3724.`stb2.`") tdSql.checkRows(1) + tdSql.query(f"select * from ts5528.device_log_yuelan_cs1") + tdSql.checkRows(2) + tdSql.checkData(0, 1, '{"deviceId":"星宇公司-861701069493741","headers":{"_uid":"4e3599eacd62834995c77b38ad95f88d","creatorId":"1199596756811550720","deviceNmae":"861701069493741","productId":"yuelan","productName":"悦蓝cat1穿戴设备"},"messageType":"REPORT_PROPERTY","properties":{"lat":35.265527067449185,"lng":118.49713144245987,"location":"118.49713144245987,35.265527067449185"},"timestamp":1728719963230}') + tdSql.checkData(1, 1, '{"deviceId":"星宇公司-861701069065507","headers":{"_uid":"9045d6b78b4ffaf1e2d244e912ebbff8","creatorId":"1199596756811550720","deviceNmae":"861701069065507","productId":"yuelan","productName":"悦蓝cat1穿戴设备"},"messageType":"REPORT_PROPERTY","properties":{"lat":36.788241914043425,"lng":119.15042325460891,"location":"119.15042325460891,36.788241914043425"},"timestamp":1728719964105}') # tdSql.query(f"select * from td24559.stb order by _ts") # tdSql.checkRows(4) # tdSql.checkData(0, 2, "POINT (4.343000 89.342000)") diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index a3830d2518..4f4f09bd85 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -2098,12 +2098,44 @@ int sml_td29373_Test() { return code; } +int sml_ts5528_test(){ + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "drop database if exists ts5528"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create database if not exists ts5528"); + taos_free_result(pRes); + + // check column name duplication + char *sql[] = { + "device_log_yuelan_cs1,deviceId=861701069493741 content=\"{\\\"deviceId\\\":\\\"星宇公司-861701069493741\\\",\\\"headers\\\":{\\\"_uid\\\":\\\"4e3599eacd62834995c77b38ad95f88d\\\",\\\"creatorId\\\":\\\"1199596756811550720\\\",\\\"deviceNmae\\\":\\\"861701069493741\\\",\\\"productId\\\":\\\"yuelan\\\",\\\"productName\\\":\\\"悦蓝cat1穿戴设备\\\"},\\\"messageType\\\":\\\"REPORT_PROPERTY\\\",\\\"properties\\\":{\\\"lat\\\":35.265527067449185,\\\"lng\\\":118.49713144245987,\\\"location\\\":\\\"118.49713144245987,35.265527067449185\\\"},\\\"timestamp\\\":1728719963230}\",createTime=1728719963230i64,id=\"4e3599eacd62834995c77b38ad95f88d\",messageId=\"\",timestamp=1728719963230i64,type=\"reportProperty\" 1728719963230", + "device_log_yuelan_cs1,deviceId=861701069065507 content=\"{\\\"deviceId\\\":\\\"星宇公司-861701069065507\\\",\\\"headers\\\":{\\\"_uid\\\":\\\"9045d6b78b4ffaf1e2d244e912ebbff8\\\",\\\"creatorId\\\":\\\"1199596756811550720\\\",\\\"deviceNmae\\\":\\\"861701069065507\\\",\\\"productId\\\":\\\"yuelan\\\",\\\"productName\\\":\\\"悦蓝cat1穿戴设备\\\"},\\\"messageType\\\":\\\"REPORT_PROPERTY\\\",\\\"properties\\\":{\\\"lat\\\":36.788241914043425,\\\"lng\\\":119.15042325460891,\\\"location\\\":\\\"119.15042325460891,36.788241914043425\\\"},\\\"timestamp\\\":1728719964105}\",createTime=1728719964105i64,id=\"9045d6b78b4ffaf1e2d244e912ebbff8\",messageId=\"\",timestamp=1728719964105i64,type=\"reportProperty\" 1728719964105", + }; + pRes = taos_query(taos, "use ts5528"); + taos_free_result(pRes); + + for( int i = 0; i < 2; i++){ + int32_t totalRows = 0; + pRes = taos_schemaless_insert_raw(taos, sql[i], strlen(sql[i]), &totalRows, TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + taos_free_result(pRes); + int code = taos_errno(pRes); + if (code != 0) { + return code; + } + } + printf("%s result success\n", __FUNCTION__); + return 0; +} int main(int argc, char *argv[]) { if (argc == 2) { taos_options(TSDB_OPTION_CONFIGDIR, argv[1]); } int ret = 0; + ret = sml_ts5528_test(); + ASSERT(!ret); ret = sml_td29691_Test(); ASSERT(ret); ret = sml_td29373_Test(); From 986f92d31766482b6aab5784d94980e71760236f Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 15 Oct 2024 14:30:36 +0800 Subject: [PATCH 25/72] fix:[TS-5528] insert error in sml --- utils/test/c/sml_test.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index 4f4f09bd85..1d8d82ccb9 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -2119,12 +2119,14 @@ int sml_ts5528_test(){ int32_t totalRows = 0; pRes = taos_schemaless_insert_raw(taos, sql[i], strlen(sql[i]), &totalRows, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); - taos_free_result(pRes); int code = taos_errno(pRes); + taos_free_result(pRes); if (code != 0) { + taos_close(taos); return code; } } + taos_close(taos); printf("%s result success\n", __FUNCTION__); return 0; } From f048de5b44c27e1b22c5992dd66fa643913126b4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Oct 2024 14:35:58 +0800 Subject: [PATCH 26/72] Revert "stmt2/interlace: separate interlace batch adding into exec2" This reverts commit bcfd74e697a3972acd1d001e482d9def2a01ee74. --- source/client/src/clientStmt2.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index b78e0d0f56..0837154fce 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1200,6 +1200,22 @@ static int stmtAddBatch2(TAOS_STMT2* stmt) { STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_ADD_BATCH)); + if (pStmt->sql.stbInterlaceMode) { + int64_t startUs2 = taosGetTimestampUs(); + pStmt->stat.addBatchUs += startUs2 - startUs; + + pStmt->sql.siInfo.tableColsReady = false; + + SStmtQNode* param = NULL; + STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)¶m)); + param->restoreTbCols = true; + param->next = NULL; + + stmtEnqueue(pStmt, param); + + return TSDB_CODE_SUCCESS; + } + STMT_ERR_RET(stmtCacheBlock(pStmt)); return TSDB_CODE_SUCCESS; @@ -1611,22 +1627,6 @@ int stmtExec2(TAOS_STMT2* stmt, int* affected_rows) { STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE)); - if (pStmt->sql.stbInterlaceMode) { - int64_t startUs2 = taosGetTimestampUs(); - pStmt->stat.addBatchUs += startUs2 - startUs; - - pStmt->sql.siInfo.tableColsReady = false; - - SStmtQNode* param = NULL; - STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)¶m)); - param->restoreTbCols = true; - param->next = NULL; - - stmtEnqueue(pStmt, param); - - return TSDB_CODE_SUCCESS; - } - if (STMT_TYPE_QUERY != pStmt->sql.type) { if (pStmt->sql.stbInterlaceMode) { int64_t startTs = taosGetTimestampUs(); From 7348eddc092f9e7f98c7ff449fe0cc443947e534 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Oct 2024 14:51:07 +0800 Subject: [PATCH 27/72] stmt2/add batch: move interlace add batch into exec2 --- source/client/src/clientStmt2.c | 8 +++++-- source/libs/parser/src/parUtil.c | 40 ++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 0837154fce..2f046b61d6 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1419,10 +1419,10 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) { if (pStmt->sql.stbInterlaceMode) { STMT_ERR_RET(stmtAppendTablePostHandle(pStmt, param)); + } else { + STMT_ERR_RET(stmtAddBatch2(pStmt)); } - STMT_ERR_RET(stmtAddBatch2(pStmt)); - pStmt->stat.bindDataUs4 += taosGetTimestampUs() - startUs4; return TSDB_CODE_SUCCESS; @@ -1625,6 +1625,10 @@ int stmtExec2(TAOS_STMT2* stmt, int* affected_rows) { return pStmt->errCode; } + if (pStmt->sql.stbInterlaceMode) { + STMT_ERR_RET(stmtAddBatch2(pStmt)); + } + STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE)); if (STMT_TYPE_QUERY != pStmt->sql.type) { diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 98676160cb..9c2977d289 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -247,7 +247,10 @@ int32_t generateSyntaxErrMsgExt(SMsgBuf* pBuf, int32_t errCode, const char* pFor } int32_t buildInvalidOperationMsg(SMsgBuf* pBuf, const char* msg) { - strncpy(pBuf->buf, msg, pBuf->len); + if (pBuf->buf) { + strncpy(pBuf->buf, msg, pBuf->len); + } + return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -259,7 +262,6 @@ int32_t buildInvalidOperationMsgExt(SMsgBuf* pBuf, const char* pFormat, ...) { return TSDB_CODE_TSC_INVALID_OPERATION; } - int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr) { if (pBuf == NULL) return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; const char* msgFormat1 = "syntax error near \'%s\'"; @@ -328,7 +330,7 @@ STableMeta* tableMetaDup(const STableMeta* pTableMeta) { STableMeta* p = taosMemoryMalloc(size + schemaExtSize); if (NULL == p) return NULL; - memcpy(p, pTableMeta, schemaExtSize+size); + memcpy(p, pTableMeta, schemaExtSize + size); if (hasSchemaExt) { p->schemaExt = (SSchemaExt*)(((char*)p) + size); } else { @@ -453,7 +455,7 @@ int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, voi // strcpy(val.colName, colName); val.pKey = jsonKey; retCode = taosHashPut(keyHash, jsonKey, keyLen, &keyLen, - CHAR_BYTES); // add key to hash to remove dumplicate, value is useless + CHAR_BYTES); // add key to hash to remove dumplicate, value is useless if (TSDB_CODE_SUCCESS != retCode) { goto end; } @@ -649,7 +651,7 @@ static int32_t buildTableReq(SHashObj* pTablesHash, SArray** pTables) { char* pKey = taosHashGetKey(p, &len); char fullName[TSDB_TABLE_FNAME_LEN] = {0}; strncpy(fullName, pKey, len); - SName name = {0}; + SName name = {0}; int32_t code = tNameFromString(&name, fullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (TSDB_CODE_SUCCESS == code) { if (NULL == taosArrayPush(*pTables, &name)) { @@ -816,9 +818,10 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog return code; } -int32_t createSelectStmtImpl(bool isDistinct, SNodeList* pProjectionList, SNode* pTable, SNodeList* pHint, SNode** ppSelect) { +int32_t createSelectStmtImpl(bool isDistinct, SNodeList* pProjectionList, SNode* pTable, SNodeList* pHint, + SNode** ppSelect) { SSelectStmt* select = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&select); + int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&select); if (NULL == select) { return code; } @@ -861,7 +864,7 @@ int32_t getMetaDataFromHash(const char* pKey, int32_t len, SHashObj* pHash, void static int32_t putTableDataToCache(const SArray* pTableReq, const SArray* pTableData, SHashObj** pTable) { int32_t ntables = taosArrayGetSize(pTableReq); for (int32_t i = 0; i < ntables; ++i) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(taosArrayGet(pTableReq, i), fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -892,7 +895,7 @@ static int32_t putDbTableDataToCache(const SArray* pDbReq, const SArray* pTableD STablesReq* pReq = taosArrayGet(pDbReq, i); int32_t ntables = taosArrayGetSize(pReq->pTables); for (int32_t j = 0; j < ntables; ++j) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(taosArrayGet(pReq->pTables, j), fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1088,7 +1091,7 @@ int32_t buildTableMetaFromViewMeta(STableMeta** pMeta, SViewMeta* pViewMeta) { } int32_t getViewMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1139,7 +1142,7 @@ int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaC } int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1299,7 +1302,7 @@ int32_t reserveTableCfgInCache(int32_t acctId, const char* pDb, const char* pTab return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableCfg); } -int32_t reserveTableTSMAInfoInCache(int32_t acctId, const char *pDb, const char *pTable, SParseMetaCache *pMetaCache) { +int32_t reserveTableTSMAInfoInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableTSMAs); } @@ -1308,9 +1311,10 @@ int32_t reserveTSMAInfoInCache(int32_t acctId, const char* pDb, const char* pTsm } int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, SArray** pIndexes) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); - if (TSDB_CODE_SUCCESS != code) return code;; + if (TSDB_CODE_SUCCESS != code) return code; + ; SArray* pSmaIndexes = NULL; code = getMetaDataFromHash(fullName, strlen(fullName), pMetaCache->pTableIndex, (void**)&pSmaIndexes); if (TSDB_CODE_SUCCESS == code && NULL != pSmaIndexes) { @@ -1323,7 +1327,7 @@ int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, } int32_t getTableTsmasFromCache(SParseMetaCache* pMetaCache, const SName* pTbName, SArray** pTsmas) { - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pTbName, tbFName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1337,7 +1341,7 @@ int32_t getTableTsmasFromCache(SParseMetaCache* pMetaCache, const SName* pTbName } int32_t getTsmaFromCache(SParseMetaCache* pMetaCache, const SName* pTsmaName, STableTSMAInfo** pTsma) { - char tsmaFName[TSDB_TABLE_FNAME_LEN]; + char tsmaFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pTsmaName, tsmaFName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1349,7 +1353,7 @@ int32_t getTsmaFromCache(SParseMetaCache* pMetaCache, const SName* pTsmaName, ST return TSDB_CODE_PAR_INTERNAL_ERROR; } *pTsma = taosArrayGetP(pTsmaRsp->pTsmas, 0); - } else if (code == TSDB_CODE_PAR_INTERNAL_ERROR){ + } else if (code == TSDB_CODE_PAR_INTERNAL_ERROR) { code = TSDB_CODE_MND_SMA_NOT_EXIST; } return code; @@ -1410,7 +1414,7 @@ err: } int32_t getTableCfgFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableCfg** pOutput) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; From 490466747184737a44688eec38db16fd0b004020 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 15 Oct 2024 15:19:37 +0800 Subject: [PATCH 28/72] fix:[TS-5528] insert error in sml --- source/client/src/clientSml.c | 5 ++--- source/client/src/clientSmlLine.c | 7 ++++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 80f583bbee..46aaf29b6b 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -262,7 +262,7 @@ int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSml return TSDB_CODE_SUCCESS; } -bool isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { +void isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { // cnt begin 0, add ts so + 2 if (unlikely(cnt + 2 > info->currSTableMeta->tableInfo.numOfColumns)) { goto END; @@ -288,12 +288,11 @@ bool isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { maxKV->length = kv->length; info->needModifySchema = true; } - return true; + return; END: info->dataFormat = false; info->reRun = true; - return false; } bool isSmlTagAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index fabda47ae1..c1f3431698 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -452,7 +452,10 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL if (info->dataFormat) { bool isAligned = isSmlColAligned(info, cnt, &kv); if (kv.type == TSDB_DATA_TYPE_BINARY && valueEscaped) { - taosArrayPush(info->escapedStringList, &kv.value); + if (taosArrayPush(info->escapedStringList, &kv.value) == NULL){ + freeSSmlKv(&kv); + return terrno; + } kv.value = NULL; } freeSSmlKv(&kv); @@ -463,10 +466,12 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL if (currElement->colArray == NULL) { currElement->colArray = taosArrayInit_s(sizeof(SSmlKv), 1); if (currElement->colArray == NULL) { + freeSSmlKv(&kv); return terrno; } } if (taosArrayPush(currElement->colArray, &kv) == NULL){ // reserve for timestamp + freeSSmlKv(&kv); return terrno; } } From c9f835e60d742e5608b902f11e0e910c4ccd0dd2 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 15 Oct 2024 15:25:28 +0800 Subject: [PATCH 29/72] fix:[TS-5528] insert error in sml --- source/client/src/clientSml.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 46aaf29b6b..80f583bbee 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -262,7 +262,7 @@ int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSml return TSDB_CODE_SUCCESS; } -void isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { +bool isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { // cnt begin 0, add ts so + 2 if (unlikely(cnt + 2 > info->currSTableMeta->tableInfo.numOfColumns)) { goto END; @@ -288,11 +288,12 @@ void isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { maxKV->length = kv->length; info->needModifySchema = true; } - return; + return true; END: info->dataFormat = false; info->reRun = true; + return false; } bool isSmlTagAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { From 056fbe5eb3ad9e92000c8d4e8c903fd4721a1e3f Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Tue, 15 Oct 2024 15:30:04 +0800 Subject: [PATCH 30/72] fix(query)[TD-30667]. Fix compilation with AVX instructions on older GCC versions --- include/util/tcompression.h | 2 +- source/util/src/tdecompress.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/util/tcompression.h b/include/util/tcompression.h index d32d20b727..1f09b750cb 100644 --- a/include/util/tcompression.h +++ b/include/util/tcompression.h @@ -156,9 +156,9 @@ int32_t getWordLength(char type); int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type); int32_t tsDecompressFloatImpAvx2(const char *input, int32_t nelements, char *output); int32_t tsDecompressDoubleImpAvx2(const char *input, int32_t nelements, char *output); -void tsDecompressTimestampAvx2(const char *input, int32_t nelements, char *output, bool bigEndian); #endif #ifdef __AVX512VL__ +void tsDecompressTimestampAvx2(const char *input, int32_t nelements, char *output, bool bigEndian); void tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output, bool bigEndian); #endif diff --git a/source/util/src/tdecompress.c b/source/util/src/tdecompress.c index 60a1f1c938..81223d7311 100644 --- a/source/util/src/tdecompress.c +++ b/source/util/src/tdecompress.c @@ -449,7 +449,9 @@ int32_t tsDecompressDoubleImpAvx2(const char *input, const int32_t nelements, ch } return (int32_t)(out - output); } +#endif +#if __AVX512VL__ // decode two timestamps in one loop. void tsDecompressTimestampAvx2(const char *const input, const int32_t nelements, char *const output, bool bigEndian) { int64_t *ostream = (int64_t *)output; @@ -588,9 +590,7 @@ void tsDecompressTimestampAvx2(const char *const input, const int32_t nelements, } return; } -#endif -#if __AVX512VL__ void tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output, bool UNUSED_PARAM(bigEndian)) { int64_t *ostream = (int64_t *)output; From ca7f490e6d6fbf07aa400c1c37d292180985924c Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 15 Oct 2024 16:53:15 +0800 Subject: [PATCH 31/72] fix invaild snapshotVer while repair wal meta file --- include/libs/wal/wal.h | 1 + source/dnode/mnode/impl/src/mndMain.c | 1 + source/dnode/vnode/src/vnd/vnodeCfg.c | 1 + source/dnode/vnode/src/vnd/vnodeCommit.c | 1 + source/libs/wal/src/walMeta.c | 6 ++++++ 5 files changed, 10 insertions(+) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 74ab0bf484..f95b3f20ca 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -50,6 +50,7 @@ typedef struct { int32_t rollPeriod; // secs int64_t retentionSize; int64_t segSize; + int64_t committed; EWalType level; // wal level int32_t encryptAlgorithm; char encryptKey[ENCRYPT_KEY_LEN + 1]; diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index bee971b966..685ad2b7a5 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -515,6 +515,7 @@ static int32_t mndInitWal(SMnode *pMnode) { .fsyncPeriod = 0, .rollPeriod = -1, .segSize = -1, + .committed = -1, .retentionPeriod = 0, .retentionSize = 0, .level = TAOS_WAL_FSYNC, diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index d3acea4766..7c789e84ae 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -45,6 +45,7 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1, .retentionPeriod = -1, .rollPeriod = 0, .segSize = 0, + .committed = 0, .retentionSize = -1, .level = TAOS_WAL_WRITE, .clearFiles = 0, diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 4a4d305f25..dae2b3a5ec 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -257,6 +257,7 @@ int vnodeLoadInfo(const char *dir, SVnodeInfo *pInfo) { code = vnodeDecodeInfo(pData, pInfo); TSDB_CHECK_CODE(code, lino, _exit); + pInfo->config.walCfg.committed = pInfo->state.committed; _exit: if (code) { if (pFile) { diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 9ade5e5638..8649581d5d 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -282,6 +282,12 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } static void walAlignVersions(SWal* pWal) { + if (pWal->cfg.committed > 0 && pWal->cfg.committed != pWal->vers.snapshotVer) { + wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is different from commited:%" PRId64 + ". in vnode/mnode. align with it.", + pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); + pWal->vers.snapshotVer = pWal->cfg.committed; + } if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) { wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId, pWal->vers.firstVer, pWal->vers.snapshotVer); From 5c3283a6de8485fba5200f63edaa957c172c54b8 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 15 Oct 2024 17:00:25 +0800 Subject: [PATCH 32/72] fix: not condition --- source/libs/scalar/src/filter.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index e07ef69990..802bec00f8 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -4679,6 +4679,9 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) { cell = cell->pNext; } + if (node->condType == LOGIC_COND_TYPE_NOT) { + stat->scalarMode = true; + } return DEAL_RES_CONTINUE; } From e4373116b2e566ff0834b3b65f76abc1ab61abf3 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 09:04:20 +0000 Subject: [PATCH 33/72] fix/wal-load-file-set --- source/libs/wal/src/walMeta.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index cb9f6e2dfe..f84496eee2 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -253,7 +253,6 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) int j = 0; // both of the lists in asc order - /* for (int i = 0; i < actualFileNum; i++) { SWalFileInfo* pLogInfo = taosArrayGet(actualLogList, i); while (j < metaFileNum) { @@ -269,7 +268,6 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) } } } - */ taosArrayClear(metaLogList); @@ -553,6 +551,7 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { // repair ts of files TAOS_CHECK_RETURN(walRepairLogFileTs(pWal, &updateMeta)); + printFileSet(pWal->fileInfoSet); // update meta file if (updateMeta) { TAOS_CHECK_RETURN(walSaveMeta(pWal)); From 47d39c4ab8d5295750992b7373f57bb4943ec4fd Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 15 Oct 2024 17:23:28 +0800 Subject: [PATCH 34/72] fix wal test in ci --- source/libs/wal/test/walMetaTest.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index 8bd4de0a89..a0285f1363 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -127,6 +127,7 @@ class WalRetentionEnv : public ::testing::Test { SWalCfg cfg; cfg.rollPeriod = -1; cfg.segSize = -1; + cfg.committed =-1; cfg.retentionPeriod = -1; cfg.retentionSize = 0; cfg.rollPeriod = 0; From 39c429182e04a034fbd1f5dd8a95ff88056a79b2 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 15 Oct 2024 10:46:25 +0000 Subject: [PATCH 35/72] fix/wal-load-file-set-fix-case --- source/libs/wal/src/walMeta.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index f84496eee2..17830ff200 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -471,7 +471,8 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { taosArraySort(actualLog, compareWalFileInfo); - wInfo("vgId:%d, wal path:%s, actual log file num:%" PRId64, pWal->cfg.vgId, pWal->path, taosArrayGetSize(actualLog)); + wInfo("vgId:%d, wal path:%s, actual log file num:%d", pWal->cfg.vgId, pWal->path, + (int32_t)taosArrayGetSize(actualLog)); printFileSet(actualLog); int metaFileNum = taosArrayGetSize(pWal->fileInfoSet); @@ -488,8 +489,8 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { TAOS_RETURN(code); } - wInfo("vgId:%d, wal path:%s, meta log file num:%" PRId64, pWal->cfg.vgId, pWal->path, - taosArrayGetSize(pWal->fileInfoSet)); + wInfo("vgId:%d, wal path:%s, meta log file num:%d", pWal->cfg.vgId, pWal->path, + (int32_t)taosArrayGetSize(pWal->fileInfoSet)); printFileSet(pWal->fileInfoSet); int32_t sz = taosArrayGetSize(pWal->fileInfoSet); @@ -1143,8 +1144,8 @@ int32_t walLoadMeta(SWal* pWal) { (void)taosCloseFile(&pFile); taosMemoryFree(buf); - wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%" PRId64, pWal->cfg.vgId, fnameStr, - taosArrayGetSize(pWal->fileInfoSet)); + wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%d", pWal->cfg.vgId, fnameStr, + (int32_t)taosArrayGetSize(pWal->fileInfoSet)); printFileSet(pWal->fileInfoSet); TAOS_RETURN(code); From 200ca2cb10cca5c10f189310e54ee35791f91155 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 15 Oct 2024 19:40:04 +0800 Subject: [PATCH 36/72] not test case --- tests/system-test/2-query/not.py | 133 +++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 tests/system-test/2-query/not.py diff --git a/tests/system-test/2-query/not.py b/tests/system-test/2-query/not.py new file mode 100644 index 0000000000..a0bd1d4e1d --- /dev/null +++ b/tests/system-test/2-query/not.py @@ -0,0 +1,133 @@ +from wsgiref.headers import tspecials +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.dbname = "db" + self.rowNum = 10 + self.ts = 1537146000000 + + def notConditionTest(self): + dbname = "nottest" + stbname = "st1" + + tdsql = tdCom.newTdSql() + tdsql.execute(f"create database if not exists {dbname}") + + stype = ["INT", "INT UNSIGNED", "BIGINT", "BIGINT UNSIGNED", "DOUBLE", "FLOAT", "SMALLINT", "SMALLINT UNSIGNED", "TINYINT", "TINYINT UNSIGNED"] + + for type_name in stype: + tdsql.execute(f"drop table if exists {dbname}.{stbname}") + tdsql.execute(f"create table if not exists {dbname}.{stbname} (ts timestamp, v1 {type_name}) tags(t1 {type_name})") + tdsql.execute(f"insert into {dbname}.sub_1 using {dbname}.{stbname} tags(1) values({self.ts}, 10)") + tdsql.execute(f"insert into {dbname}.sub_2 using {dbname}.{stbname} tags(2) values({self.ts + 1000}, 20)") + tdsql.execute(f"insert into {dbname}.sub_3 using {dbname}.{stbname} tags(3) values({self.ts + 2000}, 30)") + + # Test case 1: NOT IN + tdsql.query(f"select t1, * from {dbname}.{stbname} where t1 not in (1, 2) order by t1") + tdsql.checkRows(1) + tdsql.checkData(0, 0, 3) + + # Test case 2: NOT BETWEEN + tdsql.query(f"select * from {dbname}.{stbname} where v1 not between 10 and 20 order by t1") + tdsql.checkRows(1) + tdsql.checkData(0, 1, 30) + tdsql.query(f"select * from {dbname}.{stbname} where not(v1 not between 10 and 20) order by t1") + tdsql.checkRows(2) + + # Test case 4: NOT EQUAL + tdsql.query(f"select * from {dbname}.{stbname} where v1 != 20 order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 30) + + # Test case 8: NOT (v1 < 20 OR v1 > 30) + tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 > 30) order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 20) + tdsql.checkData(1, 1, 30) + + tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 >= 30) order by t1") + tdsql.checkRows(1) + + # Test case 9: NOT (t1 != 1) + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) order by t1") + tdsql.checkRows(1) + tdsql.checkData(0, 1, 10) + + tdsql.query(f"select * from {dbname}.{stbname} where (t1 != 1) or not (v1 == 20) order by t1") + tdsql.checkRows(3) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 20) + tdsql.checkData(2, 1, 30) + + tdsql.query(f"select * from {dbname}.{stbname} where not((t1 != 1) or not (v1 == 20)) order by t1") + tdsql.checkRows(0) + + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 20) order by t1") + tdsql.checkRows(0) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1) and not (v1 != 20)) order by t1") + tdsql.checkRows(3) + + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 10) order by t1") + tdsql.checkRows(1) + tdsql.checkData(0, 1, 10) + + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 > 2) order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 20) + + tdsql.query(f"select * from {dbname}.{stbname} where not (t1 == 2) order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 30) + + tdsql.query(f"select * from {dbname}.{stbname} where not (v1 > 10 and v1 < 30) order by t1") + tdsql.checkRows(2) + tdsql.checkData(0, 1, 10) + tdsql.checkData(1, 1, 30) + + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1") + # tdsql.checkRows(1) + # + # tdsql.checkData(0, 1, 20) + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1") + # tdsql.checkRows(2) + # + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1") + # tdsql.checkRows(2) + # + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1") + # tdsql.checkRows(1) + # + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1") + # tdsql.checkRows(1) + # + # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1") + # tdsql.checkRows(1) + + def run(self): + dbname = "db" + tdSql.prepare() + + self.notConditionTest() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) + +tdCases.addLinux(__file__, TDTestCase()) From 2b83a20c69008dcbc532e88215dc96a4390e9caa Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Mon, 14 Oct 2024 17:19:29 +0800 Subject: [PATCH 37/72] Revert "fix:[TD-32334] Generate correct time window when using interp with fill next and linear." This reverts commit 77e63d0922f6d230a314d28863744185faab8aa5. --- source/libs/executor/src/timesliceoperator.c | 20 ++-- tests/system-test/2-query/interp.py | 120 +++++++------------ 2 files changed, 50 insertions(+), 90 deletions(-) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 70bf26405e..2ea300ace8 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -278,7 +278,7 @@ static bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t in } static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock, - SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo, bool genAfterBlock) { + SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; int32_t rows = pResBlock->info.rows; @@ -427,7 +427,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } - if (start.key == INT64_MIN || end.key == INT64_MIN || genAfterBlock) { + if (start.key == INT64_MIN || end.key == INT64_MIN) { colDataSetNULL(pDst, rows); break; } @@ -463,13 +463,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } - if (genAfterBlock && rows == 0) { - hasInterp = false; - break; - } - SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot); - if (pkey->isNull == false && !genAfterBlock) { + if (pkey->isNull == false) { code = colDataSetVal(pDst, rows, pkey->pData, false); QUERY_CHECK_CODE(code, lino, _end); } else { @@ -841,7 +836,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); if (nextTs > pSliceInfo->current) { while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { - if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo, false) && + if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo) && pSliceInfo->fillType == TSDB_FILL_LINEAR) { break; } else { @@ -869,7 +864,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS doKeepLinearInfo(pSliceInfo, pBlock, i); while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) { - if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo, false) && + if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo) && pSliceInfo->fillType == TSDB_FILL_LINEAR) { break; } else { @@ -914,12 +909,13 @@ static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperato SSDataBlock* pResBlock = pSliceInfo->pRes; SInterval* pInterval = &pSliceInfo->interval; - if (pSliceInfo->pPrevGroupKey == NULL) { + if (pSliceInfo->fillType == TSDB_FILL_NEXT || pSliceInfo->fillType == TSDB_FILL_LINEAR || + pSliceInfo->pPrevGroupKey == NULL) { return; } while (pSliceInfo->current <= pSliceInfo->win.ekey) { - (void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo, true); + (void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo); pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); } diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 3cdf52725a..bcfc389d7b 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -907,7 +907,7 @@ class TDTestCase: ## {. . .} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") - tdSql.checkRows(13) + tdSql.checkRows(12) tdSql.checkData(0, 0, 5) tdSql.checkData(1, 0, 5) tdSql.checkData(2, 0, 10) @@ -920,7 +920,6 @@ class TDTestCase: tdSql.checkData(9, 0, 15) tdSql.checkData(10, 0, 15) tdSql.checkData(11, 0, 15) - tdSql.checkData(12, 0, None) ## {} ... tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(next)") @@ -958,12 +957,10 @@ class TDTestCase: ## ..{.} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)") - tdSql.checkRows(5) + tdSql.checkRows(3) tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) tdSql.checkData(2, 0, 15) - tdSql.checkData(3, 0, None) - tdSql.checkData(4, 0, None) ## ... {} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)") @@ -1275,7 +1272,7 @@ class TDTestCase: tdSql.checkData(8, 1, True) tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") - tdSql.checkRows(13) + tdSql.checkRows(12) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-01 00:00:04.000') @@ -1290,7 +1287,6 @@ class TDTestCase: tdSql.checkData(9, 0, '2020-02-01 00:00:13.000') tdSql.checkData(10, 0, '2020-02-01 00:00:14.000') tdSql.checkData(11, 0, '2020-02-01 00:00:15.000') - tdSql.checkData(12, 0, '2020-02-01 00:00:16.000') tdSql.checkData(0, 1, True) tdSql.checkData(1, 1, False) @@ -1304,7 +1300,6 @@ class TDTestCase: tdSql.checkData(9, 1, True) tdSql.checkData(10, 1, True) tdSql.checkData(11, 1, False) - tdSql.checkData(12, 1, True) tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)") tdSql.checkRows(6) @@ -1682,13 +1677,9 @@ class TDTestCase: ## | . | { | .} | tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(next)") - tdSql.checkRows(6) + tdSql.checkRows(2) tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) - tdSql.checkData(2, 0, None) - tdSql.checkData(3, 0, None) - tdSql.checkData(4, 0, None) - tdSql.checkData(5, 0, None) # test fill linear @@ -2741,7 +2732,7 @@ class TDTestCase: tdSql.checkData(4, i, 15) tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)") - tdSql.checkRows(5) + tdSql.checkRows(3) tdSql.checkCols(4) for i in range (tdSql.queryCols): @@ -2837,7 +2828,7 @@ class TDTestCase: # test fill next tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(next)") - tdSql.checkRows(19) + tdSql.checkRows(18) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-02 00:00:00.000') @@ -2860,7 +2851,6 @@ class TDTestCase: tdSql.checkData(15, 2, None) tdSql.checkData(16, 2, None) tdSql.checkData(17, 2, None) - tdSql.checkData(18, 2, None) tdSql.checkData(17, 0, '2020-02-02 00:00:17.000') @@ -3091,7 +3081,7 @@ class TDTestCase: # test fill linear tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(linear)") - tdSql.checkRows(18) + tdSql.checkRows(17) tdSql.checkCols(3) tdSql.checkData(0, 0, '2020-02-02 00:00:01.000') @@ -3113,9 +3103,8 @@ class TDTestCase: tdSql.checkData(14, 2, None) tdSql.checkData(15, 2, None) tdSql.checkData(16, 2, None) - tdSql.checkData(17, 2, None) - tdSql.checkData(17, 0, '2020-02-02 00:00:18.000') + tdSql.checkData(16, 0, '2020-02-02 00:00:17.000') tdLog.printNoPrefix("==========step13:test error cases") @@ -3231,7 +3220,7 @@ class TDTestCase: tdSql.checkData(17, 1, True) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(19) + tdSql.checkRows(18) tdSql.checkData(0, 0, '2020-02-01 00:00:00.000') tdSql.checkData(0, 1, True) @@ -3254,12 +3243,9 @@ class TDTestCase: tdSql.checkData(15, 2, 15) tdSql.checkData(16, 2, 17) tdSql.checkData(17, 2, 17) - tdSql.checkData(18, 2, None) tdSql.checkData(17, 0, '2020-02-01 00:00:17.000') tdSql.checkData(17, 1, False) - tdSql.checkData(18, 0, '2020-02-01 00:00:18.000') - tdSql.checkData(18, 1, True) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(17) @@ -3376,24 +3362,24 @@ class TDTestCase: tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(57) - for i in range(0, 19): + tdSql.checkRows(48) + for i in range(0, 14): tdSql.checkData(i, 0, 'ctb1') - for i in range(19, 38): + for i in range(14, 30): tdSql.checkData(i, 0, 'ctb2') - for i in range(38, 57): + for i in range(30, 48): tdSql.checkData(i, 0, 'ctb3') tdSql.checkData(0, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(18, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(13, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(19, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(37, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:00.000') + tdSql.checkData(29, 1, '2020-02-01 00:00:15.000') - tdSql.checkData(38, 1, '2020-02-01 00:00:00.000') - tdSql.checkData(56, 1, '2020-02-01 00:00:18.000') + tdSql.checkData(30, 1, '2020-02-01 00:00:00.000') + tdSql.checkData(47, 1, '2020-02-01 00:00:17.000') for i in range(0, 2): tdSql.checkData(i, 3, 1) @@ -3404,33 +3390,24 @@ class TDTestCase: for i in range(8, 14): tdSql.checkData(i, 3, 13) - for i in range(14, 19): - tdSql.checkData(i, 3, None) - - for i in range(19, 23): + for i in range(14, 18): tdSql.checkData(i, 3, 3) - for i in range(23, 29): + for i in range(18, 24): tdSql.checkData(i, 3, 9) - for i in range(29, 35): + for i in range(24, 30): tdSql.checkData(i, 3, 15) - for i in range(35, 38): - tdSql.checkData(i, 3, None) - - for i in range(38, 44): + for i in range(30, 36): tdSql.checkData(i, 3, 5) - for i in range(44, 50): + for i in range(36, 42): tdSql.checkData(i, 3, 11) - for i in range(50, 56): + for i in range(42, 48): tdSql.checkData(i, 3, 17) - for i in range(56, 57): - tdSql.checkData(i, 3, None) - tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(39) @@ -3473,7 +3450,7 @@ class TDTestCase: tdSql.checkRows(90) tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(171) + tdSql.checkRows(90) tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(9) @@ -3490,7 +3467,7 @@ class TDTestCase: tdSql.checkRows(48) tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") - tdSql.checkRows(57) + tdSql.checkRows(48) tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.checkRows(39) @@ -4386,7 +4363,7 @@ class TDTestCase: tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") - tdSql.checkRows(11) + tdSql.checkRows(9) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, False) @@ -4396,8 +4373,6 @@ class TDTestCase: tdSql.checkData(6, 1, True) tdSql.checkData(7, 1, False) tdSql.checkData(8, 1, False) - tdSql.checkData(9, 1, True) - tdSql.checkData(10, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 3) @@ -4408,13 +4383,11 @@ class TDTestCase: tdSql.checkData(6, 2, 8) tdSql.checkData(7, 2, 8) tdSql.checkData(8, 2, 9) - tdSql.checkData(9, 2, None) - tdSql.checkData(10, 2, None) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") - tdSql.checkRows(11) + tdSql.checkRows(9) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, False) @@ -4424,9 +4397,6 @@ class TDTestCase: tdSql.checkData(6, 1, True) tdSql.checkData(7, 1, False) tdSql.checkData(8, 1, False) - tdSql.checkData(9, 1, True) - tdSql.checkData(10, 1, True) - tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 3) @@ -4437,8 +4407,6 @@ class TDTestCase: tdSql.checkData(6, 2, 8) tdSql.checkData(7, 2, 8) tdSql.checkData(8, 2, 9) - tdSql.checkData(9, 2, None) - tdSql.checkData(10, 2, None) # super table tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") @@ -4475,7 +4443,7 @@ class TDTestCase: tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(9) + tdSql.checkRows(8) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, True) @@ -4484,7 +4452,6 @@ class TDTestCase: tdSql.checkData(5, 1, True) tdSql.checkData(6, 1, False) tdSql.checkData(7, 1, False) - tdSql.checkData(8, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 9) @@ -4494,12 +4461,11 @@ class TDTestCase: tdSql.checkData(5, 2, 13) tdSql.checkData(6, 2, 13) tdSql.checkData(7, 2, 15) - tdSql.checkData(8, 2, None) tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(9) + tdSql.checkRows(8) tdSql.checkData(0, 1, False) tdSql.checkData(1, 1, True) tdSql.checkData(2, 1, True) @@ -4508,7 +4474,6 @@ class TDTestCase: tdSql.checkData(5, 1, True) tdSql.checkData(6, 1, False) tdSql.checkData(7, 1, False) - tdSql.checkData(8, 1, True) tdSql.checkData(0, 2, 1) tdSql.checkData(1, 2, 9) @@ -4518,37 +4483,36 @@ class TDTestCase: tdSql.checkData(5, 2, 13) tdSql.checkData(6, 2, 13) tdSql.checkData(7, 2, 15) - tdSql.checkData(8, 2, None) tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(18) - for i in range(0, 9): + tdSql.checkRows(15) + for i in range(0, 7): tdSql.checkData(i, 0, 'ctb1_null') - for i in range(9, 18): + for i in range(7, 15): tdSql.checkData(i, 0, 'ctb2_null') tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(7, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:15.000') tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") - tdSql.checkRows(18) - for i in range(0, 9): + tdSql.checkRows(15) + for i in range(0, 7): tdSql.checkData(i, 0, 'ctb1_null') - for i in range(9, 18): + for i in range(7, 15): tdSql.checkData(i, 0, 'ctb2_null') tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') - tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') - tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + tdSql.checkData(7, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:15.000') # fill linear # normal table From 3310e8145620ff06332d262ad2411ada14bb36ab Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 10:15:12 +0800 Subject: [PATCH 38/72] make sure mnode can be started --- source/libs/wal/src/walMeta.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 8649581d5d..042024284c 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -288,6 +288,11 @@ static void walAlignVersions(SWal* pWal) { pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); pWal->vers.snapshotVer = pWal->cfg.committed; } + if (pWal->vers.snapshotVer < 0) { + wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".", + pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer); + pWal->vers.snapshotVer = pWal->vers.firstVer; + } if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) { wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId, pWal->vers.firstVer, pWal->vers.snapshotVer); From d0a0d578bd149e82f0623f35a52378393e229391 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 02:16:31 +0000 Subject: [PATCH 39/72] fix/TS-5533-update-os-info-when-monitor --- source/dnode/mgmt/node_util/src/dmUtil.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index b50c746c92..f8c0955745 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,6 +74,7 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; + osUpdate(); pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From f206837d48f86fb3102c2e40626a7cbe5cdc6c5f Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 10:30:27 +0800 Subject: [PATCH 40/72] modify log level while walLoadMeta failed --- source/libs/wal/src/walMgmt.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c index 3b23a2db80..d8a58efe4e 100644 --- a/source/libs/wal/src/walMgmt.c +++ b/source/libs/wal/src/walMgmt.c @@ -91,7 +91,8 @@ static int32_t walInitLock(SWal *pWal) { } SWal *walOpen(const char *path, SWalCfg *pCfg) { - SWal *pWal = taosMemoryCalloc(1, sizeof(SWal)); + int32_t code = 0; + SWal *pWal = taosMemoryCalloc(1, sizeof(SWal)); if (pWal == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); return NULL; @@ -160,17 +161,20 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) { pWal->writeHead.magic = WAL_MAGIC; // load meta - if (walLoadMeta(pWal) < 0) { - wInfo("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(terrno)); + code = walLoadMeta(pWal); + if (code < 0) { + wWarn("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(code)); } - if (walCheckAndRepairMeta(pWal) < 0) { - wError("vgId:%d, cannot open wal since repair meta file failed", pWal->cfg.vgId); + code = walCheckAndRepairMeta(pWal); + if (code < 0) { + wError("vgId:%d, cannot open wal since repair meta file failed since %s", pWal->cfg.vgId, tstrerror(code)); goto _err; } - if (walCheckAndRepairIdx(pWal) < 0) { - wError("vgId:%d, cannot open wal since repair idx file failed", pWal->cfg.vgId); + code = walCheckAndRepairIdx(pWal); + if (code < 0) { + wError("vgId:%d, cannot open wal since repair idx file failed since %s", pWal->cfg.vgId, tstrerror(code)); goto _err; } From dca0822b5d424ebadbbe52c0570dee7349812e74 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 16 Oct 2024 10:47:02 +0800 Subject: [PATCH 41/72] feat: support query-QPS new feature --- tools/auto/testCompression/testCompression.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/tools/auto/testCompression/testCompression.py b/tools/auto/testCompression/testCompression.py index 281a097f8a..ee922a1a23 100644 --- a/tools/auto/testCompression/testCompression.py +++ b/tools/auto/testCompression/testCompression.py @@ -134,8 +134,6 @@ def getMatch(datatype, algo): def generateJsonFile(algo): - print(f"doTest algo: {algo} \n") - # replace datatype context = readFileContext(templateFile) # replace compress @@ -192,8 +190,6 @@ def findContextValue(context, label): ends = [',','}',']', 0] while context[end] not in ends: end += 1 - - print(f"start = {start} end={end}\n") return context[start:end] @@ -281,10 +277,10 @@ def testQuery(): # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 speed = None - for i in range(20, len(lines)): + for i in range(0, len(lines)): # find second real + context = lines[i] pos = context.find("the QPS of all threads:") - context = lines[26] if pos == -1 : continue pos += 24 @@ -302,7 +298,6 @@ def doTest(algo, resultFile): print(f"doTest algo: {algo} \n") #cleanAndStartTaosd() - # json jsonFile = generateJsonFile(algo) From de7006743627a2e561f833c2117f17b8afce0791 Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Wed, 16 Oct 2024 10:48:41 +0800 Subject: [PATCH 42/72] fix(query)[TD-32564]. Fix memory leak in exceptional cases In function tsdbTFileSetInitRef, clear all FileObj stored in the variable lvl when an error occurs, and release the memory allocated for lvl itself. --- source/dnode/vnode/src/tsdb/tsdbFSet2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbFSet2.c b/source/dnode/vnode/src/tsdb/tsdbFSet2.c index fc681f9753..a0ae58ac96 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFSet2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFSet2.c @@ -602,14 +602,14 @@ int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs SSttLvl *lvl; code = tsdbSttLvlInitRef(pTsdb, lvl1, &lvl); if (code) { - taosMemoryFree(lvl); + tsdbSttLvlClear(&lvl); tsdbTFileSetClear(fset); return code; } code = TARRAY2_APPEND(fset[0]->lvlArr, lvl); if (code) { - taosMemoryFree(lvl); + tsdbSttLvlClear(&lvl); tsdbTFileSetClear(fset); return code; } From 2520eead4842472e3f03690041d02bcaca273269 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 04:59:02 +0000 Subject: [PATCH 43/72] fix/TS-5533-update-os-info-when-monitor-fix-check --- source/dnode/mgmt/node_util/src/dmUtil.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index f8c0955745..3a6c73a1bc 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,7 +74,10 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; - osUpdate(); + code = osUpdate(); + if (code != 0) { + dError("failed to update os info since %s", tstrerror(code)); + } pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From 48d9f2da65e8aea47a03e44f30084bbac478a06a Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 16 Oct 2024 13:38:45 +0800 Subject: [PATCH 44/72] fix ci walTest --- source/libs/wal/src/walMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 042024284c..92ad760a20 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -288,7 +288,7 @@ static void walAlignVersions(SWal* pWal) { pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed); pWal->vers.snapshotVer = pWal->cfg.committed; } - if (pWal->vers.snapshotVer < 0) { + if (pWal->vers.snapshotVer < 0 && pWal->vers.firstVer > 0) { wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".", pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer); pWal->vers.snapshotVer = pWal->vers.firstVer; From 544a1828e6c094d4ed874c36893e44afc2986f29 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 07:00:28 +0000 Subject: [PATCH 45/72] fix/TS-5532-add-more-log-status-msg --- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index f1f3a3bee7..87b1ae0efa 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -123,6 +123,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { int32_t code = 0; SStatusReq req = {0}; + dDebug("send status req to mnode, statusSeq:%d, begin to mgnt lock", pMgmt->statusSeq); (void)taosThreadRwlockRdlock(&pMgmt->pData->lock); req.sver = tsVersion; req.dnodeVer = pMgmt->pData->dnodeVer; @@ -161,14 +162,17 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { memcpy(req.clusterCfg.charset, tsCharset, TD_LOCALE_LEN); (void)taosThreadRwlockUnlock(&pMgmt->pData->lock); + dDebug("send status req to mnode, statusSeq:%d, begin to get vnode loads", pMgmt->statusSeq); SMonVloadInfo vinfo = {0}; (*pMgmt->getVnodeLoadsFp)(&vinfo); req.pVloads = vinfo.pVloads; + dDebug("send status req to mnode, statusSeq:%d, begin to get mnode loads", pMgmt->statusSeq); SMonMloadInfo minfo = {0}; (*pMgmt->getMnodeLoadsFp)(&minfo); req.mload = minfo.load; + dDebug("send status req to mnode, statusSeq:%d, begin to get qnode loads", pMgmt->statusSeq); (*pMgmt->getQnodeLoadsFp)(&req.qload); pMgmt->statusSeq++; @@ -206,6 +210,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { int8_t epUpdated = 0; (void)dmGetMnodeEpSet(pMgmt->pData, &epSet); + dDebug("send status req to mnode, statusSeq:%d, begin to send rpc msg", pMgmt->statusSeq); code = rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000); if (code != 0) { From 9bf2c61d9357faae785e78fc7209a1b686c03745 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Wed, 16 Oct 2024 16:15:45 +0800 Subject: [PATCH 46/72] not nest --- source/libs/parser/src/parAstCreater.c | 3 ++- tests/pytest/fulltest.sh | 1 + tests/pytest/regressiontest.sh | 1 + tests/system-test/2-query/not.py | 35 +++++++++++++------------- tests/system-test/runAllOne.sh | 5 ++++ 5 files changed, 26 insertions(+), 19 deletions(-) diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 5db7e18fc5..3bb9e15182 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -887,7 +887,8 @@ _err: } static int32_t addParamToLogicConditionNode(SLogicConditionNode* pCond, SNode* pParam) { - if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType && + ((SLogicConditionNode*)pParam)->condType != LOGIC_COND_TYPE_NOT) { int32_t code = nodesListAppendList(pCond->pParameterList, ((SLogicConditionNode*)pParam)->pParameterList); ((SLogicConditionNode*)pParam)->pParameterList = NULL; nodesDestroyNode(pParam); diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 3df42cbf33..eb975ec46f 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -225,6 +225,7 @@ python3 test.py -f query/distinctOneColTb.py python3 ./test.py -f query/filter.py python3 ./test.py -f query/filterCombo.py python3 ./test.py -f query/queryNormal.py +python3 ./test.py -f query/not.py python3 ./test.py -f query/queryError.py python3 ./test.py -f query/filterAllIntTypes.py python3 ./test.py -f query/filterFloatAndDouble.py diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index b69ee37a55..e42d53ded1 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -139,6 +139,7 @@ python3 ./test.py -f query/querySort.py python3 ./test.py -f query/queryJoin.py python3 ./test.py -f query/filterCombo.py python3 ./test.py -f query/queryNormal.py +python3 ./test.py -f query/not.py python3 ./test.py -f query/select_last_crash.py python3 ./test.py -f query/queryNullValueTest.py python3 ./test.py -f query/queryInsertValue.py diff --git a/tests/system-test/2-query/not.py b/tests/system-test/2-query/not.py index a0bd1d4e1d..1254226db3 100644 --- a/tests/system-test/2-query/not.py +++ b/tests/system-test/2-query/not.py @@ -98,24 +98,23 @@ class TDTestCase: tdsql.checkData(0, 1, 10) tdsql.checkData(1, 1, 30) - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1") - # tdsql.checkRows(1) - # - # tdsql.checkData(0, 1, 20) - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1") - # tdsql.checkRows(2) - # - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1") - # tdsql.checkRows(2) - # - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1") - # tdsql.checkRows(1) - # - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1") - # tdsql.checkRows(1) - # - # tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1") - # tdsql.checkRows(1) + tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1") + tdsql.checkRows(1) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1") + tdsql.checkRows(2) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1") + tdsql.checkRows(2) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1") + tdsql.checkRows(1) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1") + tdsql.checkRows(1) + + tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1") + tdsql.checkRows(1) def run(self): dbname = "db" diff --git a/tests/system-test/runAllOne.sh b/tests/system-test/runAllOne.sh index 3bb128ea28..0d65fd616b 100644 --- a/tests/system-test/runAllOne.sh +++ b/tests/system-test/runAllOne.sh @@ -245,6 +245,8 @@ python3 ./test.py -f 2-query/min.py -P python3 ./test.py -f 2-query/min.py -P -R python3 ./test.py -f 2-query/normal.py -P python3 ./test.py -f 2-query/normal.py -P -R +python3 ./test.py -f 2-query/not.py -P +python3 ./test.py -f 2-query/not.py -P -R python3 ./test.py -f 2-query/mode.py -P python3 ./test.py -f 2-query/mode.py -P -R python3 ./test.py -f 2-query/Now.py -P @@ -427,6 +429,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 2 python3 ./test.py -f 2-query/max.py -P -Q 2 python3 ./test.py -f 2-query/min.py -P -Q 2 python3 ./test.py -f 2-query/normal.py -P -Q 2 +python3 ./test.py -f 2-query/not.py -P -Q 2 python3 ./test.py -f 2-query/mode.py -P -Q 2 python3 ./test.py -f 2-query/count.py -P -Q 2 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 2 @@ -526,6 +529,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 3 python3 ./test.py -f 2-query/max.py -P -Q 3 python3 ./test.py -f 2-query/min.py -P -Q 3 python3 ./test.py -f 2-query/normal.py -P -Q 3 +python3 ./test.py -f 2-query/not.py -P -Q 3 python3 ./test.py -f 2-query/mode.py -P -Q 3 python3 ./test.py -f 2-query/count.py -P -Q 3 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 3 @@ -624,6 +628,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 4 python3 ./test.py -f 2-query/max.py -P -Q 4 python3 ./test.py -f 2-query/min.py -P -Q 4 python3 ./test.py -f 2-query/normal.py -P -Q 4 +python3 ./test.py -f 2-query/not.py -P -Q 4 python3 ./test.py -f 2-query/mode.py -P -Q 4 python3 ./test.py -f 2-query/count.py -P -Q 4 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 4 From b56701e05c9e940346dde1463ebf42bad7b7d38a Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 16 Oct 2024 16:31:30 +0800 Subject: [PATCH 47/72] fix: remove invalid error code check and add meta data recover and compact function --- source/dnode/mgmt/exe/dmMain.c | 3 + source/dnode/vnode/src/inc/vnodeInt.h | 3 + source/dnode/vnode/src/meta/metaOpen.c | 171 +++++++++++++++++++++++- source/dnode/vnode/src/meta/metaTable.c | 3 - 4 files changed, 175 insertions(+), 5 deletions(-) diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index ba162bd84f..1089b0eced 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -182,6 +182,7 @@ static void dmSetSignalHandle() { } #endif } +extern bool generateNewMeta; static int32_t dmParseArgs(int32_t argc, char const *argv[]) { global.startTime = taosGetTimestampMs(); @@ -221,6 +222,8 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) { global.dumpSdb = true; } else if (strcmp(argv[i], "-dTxn") == 0) { global.deleteTrans = true; + } else if (strcmp(argv[i], "-r") == 0) { + generateNewMeta = true; } else if (strcmp(argv[i], "-E") == 0) { if (i < argc - 1) { if (strlen(argv[++i]) >= PATH_MAX) { diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 1bd4317234..fc98d6578b 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -81,6 +81,9 @@ typedef struct SCommitInfo SCommitInfo; typedef struct SCompactInfo SCompactInfo; typedef struct SQueryNode SQueryNode; +#define VNODE_META_TMP_DIR "meta.tmp" +#define VNODE_META_BACKUP_DIR "meta.backup" + #define VNODE_META_DIR "meta" #define VNODE_TSDB_DIR "tsdb" #define VNODE_TQ_DIR "tq" diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index f062505ac7..ef36521879 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -133,7 +133,7 @@ static void doScan(SMeta *pMeta) { } } -int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { +static int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) { SMeta *pMeta = NULL; int32_t code = 0; int32_t lino; @@ -144,7 +144,11 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { // create handle vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN); offset = strlen(path); - snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, VNODE_META_DIR); + snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir); + + if (strncmp(metaDir, VNODE_META_TMP_DIR, strlen(VNODE_META_TMP_DIR)) == 0) { + taosRemoveDir(path); + } if ((pMeta = taosMemoryCalloc(1, sizeof(*pMeta) + strlen(path) + 1)) == NULL) { TSDB_CHECK_CODE(code = terrno, lino, _exit); @@ -245,6 +249,169 @@ _exit: return code; } +bool generateNewMeta = false; + +static int32_t metaGenerateNewMeta(SMeta **ppMeta) { + SMeta *pNewMeta = NULL; + SMeta *pMeta = *ppMeta; + SVnode *pVnode = pMeta->pVnode; + + metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode)); + + // Open a new meta for orgainzation + int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false); + if (code) { + return code; + } + + code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL); + if (code) { + return code; + } + + // i == 0, scan super table + // i == 1, scan normal table and child table + for (int i = 0; i < 2; i++) { + TBC *uidCursor = NULL; + int32_t counter = 0; + + code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL); + if (code) { + metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + code = tdbTbcMoveToFirst(uidCursor); + if (code) { + metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code)); + tdbTbcClose(uidCursor); + return code; + } + + for (;;) { + const void *pKey; + int kLen; + const void *pVal; + int vLen; + + if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) { + break; + } + + tb_uid_t uid = *(tb_uid_t *)pKey; + SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal; + if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table + || (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table + ) { + counter++; + if (i == 0) { + metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid); + } else { + metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, + pUidIdxVal->suid == 0 ? "normal" : "child", uid); + } + + // fetch table entry + void *value = NULL; + int valueSize = 0; + if (tdbTbGet(pMeta->pTbDb, + &(STbDbKey){ + .version = pUidIdxVal->version, + .uid = uid, + }, + sizeof(uid), &value, &valueSize) == 0) { + SDecoder dc = {0}; + SMetaEntry me = {0}; + tDecoderInit(&dc, value, valueSize); + if (metaDecodeEntry(&dc, &me) == 0) { + if (metaHandleEntry(pNewMeta, &me) != 0) { + metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid); + } + } + tDecoderClear(&dc); + } + tdbFree(value); + } + + code = tdbTbcMoveToNext(uidCursor); + if (code) { + metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + } + + tdbTbcClose(uidCursor); + } + + code = metaCommit(pNewMeta, pNewMeta->txn); + if (code) { + metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + code = metaFinishCommit(pNewMeta, pNewMeta->txn); + if (code) { + metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + + if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) { + metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + metaClose(&pNewMeta); + metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode)); + return 0; +} + +int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { + int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + if (code) { + return code; + } + + if (generateNewMeta) { + // backup the old meta + char path[TSDB_FILENAME_LEN] = {0}; + char oldMetaPath[TSDB_FILENAME_LEN] = {0}; + char newMetaPath[TSDB_FILENAME_LEN] = {0}; + char backupMetaPath[TSDB_FILENAME_LEN] = {0}; + + vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN); + snprintf(oldMetaPath, sizeof(oldMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_DIR); + snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR); + snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR); + + if (taosCheckExistFile(backupMetaPath)) { + metaError("vgId:%d backup meta already exists, please check", TD_VID(pVnode)); + return TSDB_CODE_FAILED; + } + + code = metaGenerateNewMeta(ppMeta); + if (code) { + metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + + metaClose(ppMeta); + if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { + metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + + // rename the new meta to old meta + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); + if (code) { + metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } + } + + return TSDB_CODE_SUCCESS; +} + int32_t metaUpgrade(SVnode *pVnode, SMeta **ppMeta) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino; diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 08ee422126..21d12ef77d 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2985,9 +2985,6 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { } } end: - if (terrno != 0) { - ret = terrno; - } tDecoderClear(&dc); tdbFree(pData); return ret; From 75650908ba622638ce98415b894d322769349934 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 09:17:21 +0000 Subject: [PATCH 48/72] fix/TS-5533-revert-add-osupdate-when-monitor --- source/dnode/mgmt/node_util/src/dmUtil.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index 3a6c73a1bc..b50c746c92 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -74,10 +74,6 @@ void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { } pInfo->mem_total = tsTotalMemoryKB; pInfo->disk_engine = 0; - code = osUpdate(); - if (code != 0) { - dError("failed to update os info since %s", tstrerror(code)); - } pInfo->disk_used = tsDataSpace.size.used; pInfo->disk_total = tsDataSpace.size.total; code = taosGetCardInfoDelta(&pInfo->net_in, &pInfo->net_out); From 1d018d0d287cc428cdf3656e964e6c492784fd1e Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 16 Oct 2024 18:05:35 +0800 Subject: [PATCH 49/72] enh: stmt2Perf add p90 p95 min max --- .../auto/stmt2Performance/json/template.json | 6 +- tools/auto/stmt2Performance/stmt2Perf.py | 110 +++++++++++++----- 2 files changed, 86 insertions(+), 30 deletions(-) diff --git a/tools/auto/stmt2Performance/json/template.json b/tools/auto/stmt2Performance/json/template.json index 659c5966a4..8c54c5be22 100644 --- a/tools/auto/stmt2Performance/json/template.json +++ b/tools/auto/stmt2Performance/json/template.json @@ -16,14 +16,14 @@ { "dbinfo": { "name": "dbrate", - "drop": "yes", - "vgroups": 2 + "vgroups": 1, + "drop": "yes" }, "super_tables": [ { "name": "meters", "child_table_exists": "no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "d", "insert_mode": "@STMT_MODE", "interlace_rows": @INTERLACE_MODE, diff --git a/tools/auto/stmt2Performance/stmt2Perf.py b/tools/auto/stmt2Performance/stmt2Perf.py index e7a4d5ecbe..4d99f2483d 100644 --- a/tools/auto/stmt2Performance/stmt2Perf.py +++ b/tools/auto/stmt2Performance/stmt2Perf.py @@ -198,16 +198,20 @@ def findContextValue(context, label): def writeTemplateInfo(resultFile): # create info - context = readFileContext(templateFile) + context = readFileContext(templateFile) vgroups = findContextValue(context, "vgroups") childCount = findContextValue(context, "childtable_count") insertRows = findContextValue(context, "insert_rows") - line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n" + bindVGroup = findContextValue(context, "thread_bind_vgroup") + nThread = findContextValue(context, "thread_count") + if bindVGroup.lower().find("yes") != -1: + nThread = vgroups + line = f"thread_bind_vgroup = {bindVGroup}\nvgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\ninsertThreads = {nThread} \n\n" print(line) appendFileContext(resultFile, line) -def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): +def totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed): global Number # flush command = 'taos -s "flush database dbrate;"' @@ -220,7 +224,7 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): # read compress rate command = 'taos -s "show table distributed dbrate.meters\G;"' rets = runRetList(command) - print(rets) + #print(rets) str1 = rets[5] arr = str1.split(" ") @@ -234,46 +238,88 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): str2 = arr[6] pos = str2.find("=[") rate = str2[pos+2:] - print("rate =" + rate) # total data file size #dataSize = getFolderSize(f"{dataDir}/vnode/") #dataSizeMB = int(dataSize/1024/1024) # appand to file - + + # %("No", "stmtMode", "interlaceRows", "spent", "spent-real", "writeSpeed", "write-real", "query-QPS", "dataSize", "rate") Number += 1 - context = "%10s %10s %15s %10s %10s %30s %15s\n"%( Number, stmt, interlace, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed) + ''' + context = "%2s %6s %10s %10s %10s %15s %15s %16s %16s %16s %16s %16s %8s %8s %8s\n"%( + Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " rows/s", writeReal + " rows/s", + min, avg, p90, p99, max, + querySpeed, str(totalSize) + " MB", rate + "%") + ''' + context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%( + Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + "r/s", writeReal + "r/s", + min, avg, p90, p99, max + "ms", + querySpeed, str(totalSize) + " MB", rate + "%") + showLog(context) appendFileContext(resultFile, context) +def cutEnd(line, start, endChar): + pos = line.find(endChar, start) + if pos == -1: + return line[start:] + return line[start : pos] + +def findValue(context, pos, key, endChar,command): + pos = context.find(key, pos) + if pos == -1: + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") + exit(1) + pos += len(key) + value = cutEnd(context, pos, endChar) + return (value, pos) + def testWrite(jsonFile): command = f"taosBenchmark -f {jsonFile}" output, context = run(command, 60000) + print(context) + # SUCC: Spent 0.960248 (real 0.947154) seconds to insert rows: 100000 with 1 thread(s) into dbrate 104139.76 (real 105579.45) records/second - # find second real - pos = context.find("(real ") + # spent + key = "Spent " + pos = -1 + pos1 = 0 + while pos1 != -1: # find last "Spent " + pos1 = context.find(key, pos1) + if pos1 != -1: + pos = pos1 # update last found + pos1 += len(key) if pos == -1: - print(f"error, run command={command} output not found first \"(real\" keyword. error={context}") + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") exit(1) - pos = context.find("(real ", pos + 5) + pos += len(key) + spent = cutEnd(context, pos, ".") + + # spent-real + spentReal, pos = findValue(context, pos, "(real ", ".", command) + + # writeSpeed + key = "into " + pos = context.find(key, pos) if pos == -1: - print(f"error, run command={command} output not found second \"(real\" keyword. error={context}") - exit(1) - - pos += 5 - length = len(context) - while pos < length and context[pos] == ' ': - pos += 1 - end = context.find(".", pos) - if end == -1: - print(f"error, run command={command} output not found second \".\" keyword. error={context}") + print(f"error, run command={command} output not found \"{key}\" keyword. context={context}") exit(1) + pos += len(key) + writeSpeed, pos = findValue(context, pos, " ", ".", command) + # writeReal + writeReal, pos = findValue(context, pos, "(real ", ".", command) - speed = context[pos: end] - #print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n") - return speed + # delay + min, pos = findValue(context, pos, "min: ", ",", command) + avg, pos = findValue(context, pos, "avg: ", ",", command) + p90, pos = findValue(context, pos, "p90: ", ",", command) + p99, pos = findValue(context, pos, "p99: ", ",", command) + max, pos = findValue(context, pos, "max: ", "ms", command) + + return (spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max) def testQuery(): command = f"taosBenchmark -f json/query.json" @@ -308,13 +354,13 @@ def doTest(stmt, interlace, resultFile): # run taosBenchmark t1 = time.time() - writeSpeed = testWrite(jsonFile) + spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max = testWrite(jsonFile) t2 = time.time() # total write speed querySpeed = testQuery() # total compress rate - totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed) + totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed) def main(): @@ -333,7 +379,17 @@ def main(): # json info writeTemplateInfo(resultFile) # head - context = "\n%10s %10s %15s %10s %10s %30s %15s\n"%("No", "stmtMode", "interlaceRows", "dataSize", "rate", "writeSpeed", "query-QPS") + ''' + context = "%3s %8s %10s %10s %10s %15s %15s %10s %10s %10s %10s %10s %8s %8s %8s\n"%( + "No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real", + "min", "avg", "p90", "p99", "max", + "query-QPS", "dataSize", "rate") + ''' + context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%( + "No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real", + "min", "avg", "p90", "p99", "max", + "query-QPS", "dataSize", "rate") + appendFileContext(resultFile, context) From 27c087e9aec5bd1a499a4c2e545c283d7536b727 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 16 Oct 2024 18:36:49 +0800 Subject: [PATCH 50/72] refactor: do some internal refactor. --- source/dnode/mnode/impl/src/mndStream.c | 3 ++- source/dnode/mnode/impl/src/mndStreamTransAct.c | 9 +++------ source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 +- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 69d3de25fc..a4327b777f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1783,7 +1783,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; - int32_t code = 0; + int32_t code = 0; if ((code = grantCheckExpire(TSDB_GRANT_STREAMS)) < 0) { return code; @@ -1811,6 +1811,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { return 0; } + mInfo("stream:%s,%" PRId64 " start to resume stream from pause", resumeReq.name, pStream->uid); if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { sdbRelease(pMnode->pSdb, pStream); return -1; diff --git a/source/dnode/mnode/impl/src/mndStreamTransAct.c b/source/dnode/mnode/impl/src/mndStreamTransAct.c index 4e0bf97587..139ea4f147 100644 --- a/source/dnode/mnode/impl/src/mndStreamTransAct.c +++ b/source/dnode/mnode/impl/src/mndStreamTransAct.c @@ -61,7 +61,6 @@ static int32_t doSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa static int32_t doSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) { SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq)); if (pReq == NULL) { - // terrno = TSDB_CODE_OUT_OF_MEMORY; return terrno; } @@ -93,7 +92,6 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT if (pReq == NULL) { mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq), tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - // terrno = TSDB_CODE_OUT_OF_MEMORY; return terrno; } @@ -106,19 +104,18 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT bool hasEpset = false; int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); if (code != TSDB_CODE_SUCCESS || (!hasEpset)) { - terrno = code; taosMemoryFree(pReq); - return terrno; + return code; } code = setTransAction(pTrans, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID); if (code != 0) { taosMemoryFree(pReq); - return terrno; + return code; } mDebug("set the resume action for trans:%d", pTrans->id); - return 0; + return code; } static int32_t doSetDropActionFromId(SMnode *pMnode, STrans *pTrans, SOrphanTask* pTask) { diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 44a39f4328..c4971e27cf 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -5910,7 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_ } else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing } else { code = TSDB_CODE_INVALID_PARA; - tsdbError("invalid mr.me.type:%d %s, code:%s", mr.me.type, tstrerror(code)); + tsdbError("invalid mr.me.type:%d, code:%s", mr.me.type, tstrerror(code)); metaReaderClear(&mr); return code; } From e893547c1471325b9d93a273d23f36d55c346a3f Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 16 Oct 2024 10:58:26 +0000 Subject: [PATCH 51/72] fix/TD-32583-remove-useless-timer-execution --- source/libs/sync/inc/syncEnv.h | 1 - source/libs/sync/src/syncMain.c | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/source/libs/sync/inc/syncEnv.h b/source/libs/sync/inc/syncEnv.h index 0376920e8a..caf0e88457 100644 --- a/source/libs/sync/inc/syncEnv.h +++ b/source/libs/sync/inc/syncEnv.h @@ -24,7 +24,6 @@ extern "C" { #define TIMER_MAX_MS 0x7FFFFFFF #define PING_TIMER_MS 5000 -#define HEARTBEAT_TICK_NUM 20 typedef struct SSyncEnv { uint8_t isStart; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 451e82c7d4..3d37cdb560 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -977,9 +977,10 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) { pData->logicClock = pSyncTimer->logicClock; pData->execTime = tsNow + pSyncTimer->timerMS; - sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, pData->rid, pData->destId.addr); + sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64 " at %d", pSyncNode->vgId, pData->rid, + pData->destId.addr, pSyncTimer->timerMS); - TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)(pData->rid), + TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid), syncEnv()->pTimerManager, &pSyncTimer->pTimer)); } else { code = TSDB_CODE_SYN_INTERNAL_ERROR; @@ -2711,7 +2712,8 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { return; } - sTrace("vgId:%d, eq peer hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, pData->destId.addr); + sTrace("vgId:%d, peer hb timer execution, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, + pData->destId.addr); if (pSyncNode->totalReplicaNum > 1) { int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock); @@ -2753,13 +2755,12 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { if (ret != 0) { sError("vgId:%d, failed to send heartbeat since %s", pSyncNode->vgId, tstrerror(ret)); } - } else { } if (syncIsInit()) { - // sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId); - if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, - (void*)hbDataRid, syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { + sTrace("vgId:%d, reset peer hb timer at %d", pSyncNode->vgId, pSyncTimer->timerMS); + if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid, + syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code)); syncNodeRelease(pSyncNode); syncHbTimerDataRelease(pData); From 71a762db7526de763b615044ff7ffb893bacb4a3 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 16 Oct 2024 19:32:41 +0800 Subject: [PATCH 52/72] add more error handle --- source/dnode/vnode/src/meta/metaOpen.c | 77 ++++++++++++++++---------- source/libs/tdb/src/db/tdbBtree.c | 3 + 2 files changed, 51 insertions(+), 29 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index ef36521879..8f2c0b5a5e 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -364,13 +364,7 @@ static int32_t metaGenerateNewMeta(SMeta **ppMeta) { } int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { - int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); - if (code) { - return code; - } - if (generateNewMeta) { - // backup the old meta char path[TSDB_FILENAME_LEN] = {0}; char oldMetaPath[TSDB_FILENAME_LEN] = {0}; char newMetaPath[TSDB_FILENAME_LEN] = {0}; @@ -381,32 +375,57 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR); snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR); - if (taosCheckExistFile(backupMetaPath)) { - metaError("vgId:%d backup meta already exists, please check", TD_VID(pVnode)); + bool oldMetaExist = taosCheckExistFile(oldMetaPath); + bool newMetaExist = taosCheckExistFile(newMetaPath); + bool backupMetaExist = taosCheckExistFile(backupMetaPath); + + if ((!backupMetaExist && !oldMetaExist && newMetaExist) // case 2 + || (backupMetaExist && !oldMetaExist && !newMetaExist) // case 4 + || (backupMetaExist && oldMetaExist && newMetaExist) // case 8 + ) { + metaError("vgId:%d invalid meta state, please check", TD_VID(pVnode)); return TSDB_CODE_FAILED; + } else if ((backupMetaExist && oldMetaExist && !newMetaExist) // case 7 + || (!backupMetaExist && !oldMetaExist && !newMetaExist) // case 1 + ) { + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + } else if (backupMetaExist && !oldMetaExist && newMetaExist) { + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + } else { + int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); + if (code) { + return code; + } + + code = metaGenerateNewMeta(ppMeta); + if (code) { + metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + } + + metaClose(ppMeta); + if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { + metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + + // rename the new meta to old meta + if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { + metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); + return terrno; + } + code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); + if (code) { + metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); + return code; + } } - code = metaGenerateNewMeta(ppMeta); - if (code) { - metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); - } - - metaClose(ppMeta); - if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) { - metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - - // rename the new meta to old meta - if (taosRenameFile(newMetaPath, oldMetaPath) != 0) { - metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno)); - return terrno; - } - code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false); - if (code) { - metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code)); - return code; - } + } else { + return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback); } return TSDB_CODE_SUCCESS; diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index c688a6cc6a..2333a4a6a2 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1446,6 +1446,9 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, return ret; } ofpCell = tdbPageGetCell(ofp, 0); + if (ofpCell == NULL) { + return TSDB_CODE_INVALID_DATA_FMT; + } if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { bytes = nLeft; From d63040f2f77e48cd67593e29059009f36b731c34 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 16 Oct 2024 19:41:29 +0800 Subject: [PATCH 53/72] TS-5459: add test case --- tests/system-test/2-query/slow_query_basic.py | 66 +++++++++++++++++++ tests/system-test/win-test-file | 1 + 2 files changed, 67 insertions(+) create mode 100644 tests/system-test/2-query/slow_query_basic.py diff --git a/tests/system-test/2-query/slow_query_basic.py b/tests/system-test/2-query/slow_query_basic.py new file mode 100644 index 0000000000..10595028a7 --- /dev/null +++ b/tests/system-test/2-query/slow_query_basic.py @@ -0,0 +1,66 @@ +import random +import string +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +import numpy as np + + +class TDTestCase: + updatecfgDict = {'slowLogThresholdTest': ''} + updatecfgDict["slowLogThresholdTest"] = 0 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def getPath(self, tool="taosBenchmark"): + if (platform.system().lower() == 'windows'): + tool = tool + ".exe" + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def taosBenchmark(self, param): + binPath = self.getPath() + cmd = f"{binPath} {param}" + tdLog.info(cmd) + os.system(cmd) + + def testSlowQuery(self): + self.taosBenchmark(" -d db -t 2 -v 2 -n 1000000 -y") + sql = "select count(*) from db.meters" + for i in range(10): + tdSql.query(sql) + tdSql.checkData(0, 0, 2 * 1000000) + + def run(self): + self.testSlowQuery() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index e86047bca8..c3047efdd7 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -925,3 +925,4 @@ python3 ./test.py -f 99-TDcase/TD-20582.py python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3 python3 ./test.py -f eco-system/meta/database/keep_time_offset.py +python3 ./test.py -f 2-query/slow_query_basic.py From dd05353b74e2b7b84fdaf42adaacb164b53a0fd2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 16 Oct 2024 22:07:37 +0800 Subject: [PATCH 54/72] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 2 +- source/dnode/vnode/src/tqCommon/tqCommon.c | 2 +- source/libs/stream/src/streamCheckStatus.c | 2 +- source/libs/stream/src/streamCheckpoint.c | 3 ++- source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamMeta.c | 5 +++-- source/libs/stream/src/streamSched.c | 4 ++-- source/libs/stream/src/streamTask.c | 3 +++ 8 files changed, 14 insertions(+), 9 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index e6d750468e..58c1707e1f 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -754,7 +754,7 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); -void streamMetaAcquireOneTask(SStreamTask* pTask); +int32_t streamMetaAcquireOneTask(SStreamTask* pTask); void streamMetaClear(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 3871011407..a00e92997c 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -692,7 +692,7 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if ((ppTask != NULL) && ((*ppTask) != NULL)) { - streamMetaAcquireOneTask(*ppTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask); SStreamTask* pTask = *ppTask; if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index 75bcc326b3..c1c54b3c0b 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -299,7 +299,7 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) { return; } - /*SStreamTask* p = */ streamMetaAcquireOneTask(pTask); // add task ref here + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs()); int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index e44bca123b..be914d9746 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -347,7 +347,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock if (old == 0) { int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref); - streamMetaAcquireOneTask(pTask); + + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); pTmrInfo->launchChkptId = pActiveInfo->activeId; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 133663ac28..62d60ff664 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -1162,7 +1162,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) { if (old == 0) { int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref); - streamMetaAcquireOneTask(pTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 29152c6205..7e9b60b61a 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -753,9 +753,10 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task return code; } -void streamMetaAcquireOneTask(SStreamTask* pTask) { +int32_t streamMetaAcquireOneTask(SStreamTask* pTask) { int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref); + return ref; } void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { @@ -866,7 +867,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (ppTask) { pTask = *ppTask; - // it is an fill-history task, remove the related stream task's id that points to it + // it is a fill-history task, remove the related stream task's id that points to it if (pTask->info.fillHistory == 0) { int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1); } diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 095a5af6d4..cdaa603e38 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -22,7 +22,7 @@ static void streamTaskSchedHelper(void* param, void* tmrId); void streamSetupScheduleTrigger(SStreamTask* pTask) { int64_t delaySchema = pTask->info.delaySchedParam; if (delaySchema != 0 && pTask->info.fillHistory == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); + int32_t ref = streamMetaAcquireOneTask(pTask); stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref, pTask->info.delaySchedParam); @@ -80,7 +80,7 @@ void streamTaskResumeInFuture(SStreamTask* pTask) { pTask->status.schedIdleTime, ref); // add one ref count for task - streamMetaAcquireOneTask(pTask); + int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer, pTask->pMeta->vgId, "resume-task-tmr"); } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 71a2ed3e4a..727701e03e 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -258,10 +258,12 @@ void tFreeStreamTask(SStreamTask* pTask) { if (pTask->inputq.queue) { streamQueueClose(pTask->inputq.queue, pTask->id.taskId); + pTask->inputq.queue = NULL; } if (pTask->outputq.queue) { streamQueueClose(pTask->outputq.queue, pTask->id.taskId); + pTask->outputq.queue = NULL; } if (pTask->exec.qmsg) { @@ -275,6 +277,7 @@ void tFreeStreamTask(SStreamTask* pTask) { if (pTask->exec.pWalReader != NULL) { walCloseReader(pTask->exec.pWalReader); + pTask->exec.pWalReader = NULL; } streamClearChkptReadyMsg(pTask->chkInfo.pActiveInfo); From 9c7b925613367bc795a30169e48ff6aa2c84014f Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Wed, 16 Oct 2024 18:20:19 +0800 Subject: [PATCH 55/72] fix(query)[TD-30667]. Check hardware support for AVX instructions Modify the CMake script to check for hardware support of AVX instructions directly, instead of relying on compiler flags for the verification. --- cmake/cmake.define | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index 9fae397363..7bcd400fa6 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -169,11 +169,48 @@ ELSE () SET(COMPILER_SUPPORT_AVX512VL false) ELSE() CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA) - CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX) - CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2) CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F) CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI) CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL) + + INCLUDE(CheckCSourceRuns) + SET(CMAKE_REQUIRED_FLAGS "-mavx") + check_c_source_runs(" + #include + int main() { + __m256d a, b, c; + double buf[4] = {0}; + a = _mm256_loadu_pd(buf); + b = _mm256_loadu_pd(buf); + c = _mm256_add_pd(a, b); + _mm256_storeu_pd(buf, c); + for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { + if (buf[i] != 0) { + return 1; + } + } + return 0; + } + " COMPILER_SUPPORT_AVX) + + SET(CMAKE_REQUIRED_FLAGS "-mavx2") + check_c_source_runs(" + #include + int main() { + __m256i a, b, c; + int buf[8] = {0}; + a = _mm256_loadu_si256((__m256i *)buf); + b = _mm256_loadu_si256((__m256i *)buf); + c = _mm256_and_si256(a, b); + _mm256_storeu_si256((__m256i *)buf, c); + for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { + if (buf[i] != 0) { + return 1; + } + } + return 0; + } + " COMPILER_SUPPORT_AVX2) ENDIF() IF (COMPILER_SUPPORT_SSE42) From 9bc38af7edf5dab39006537f034dd4338c91ab0d Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 17 Oct 2024 09:42:36 +0800 Subject: [PATCH 56/72] fix:[TD-32585]remove clean up app info in taos_cleanup --- source/client/src/clientMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index a35c7c7a4c..1a66266000 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -84,7 +84,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); - cleanupAppInfo(); +// cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); From 9aaab9c3b0b8b70dd393df4590482f3a22f11f07 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 17 Oct 2024 10:39:59 +0800 Subject: [PATCH 57/72] feat(stream):add max delay check --- source/libs/parser/src/parTranslater.c | 13 +++++++++++++ source/libs/stream/src/streamState.c | 1 + tests/script/tsim/stream/basic2.sim | 13 +++++++++++++ 3 files changed, 27 insertions(+) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 5c6f619397..4c9c559457 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -10609,6 +10609,19 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm "Non window query only support scalar function, aggregate function is not allowed"); } + if (NULL != pStmt->pOptions->pDelay) { + SValueNode* pVal = (SValueNode*)pStmt->pOptions->pDelay; + int64_t minDelay = 0; + char* str = "5s"; + if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && TSDB_CODE_SUCCESS == + parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) { + if (pVal->datum.i < minDelay) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "stream max delay must be bigger than 5 session"); + } + } + } + return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 1994c882aa..45a36bd451 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -477,6 +477,7 @@ int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal if (!pStr) { if (onlyCache && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) { (*pWinCode) = TSDB_CODE_FAILED; + goto _end; } (*pWinCode) = streamStateGetParName_rocksdb(pState, groupId, pVal); if ((*pWinCode) == TSDB_CODE_SUCCESS && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) { diff --git a/tests/script/tsim/stream/basic2.sim b/tests/script/tsim/stream/basic2.sim index ad655f2d16..2bef1c5c4c 100644 --- a/tests/script/tsim/stream/basic2.sim +++ b/tests/script/tsim/stream/basic2.sim @@ -133,4 +133,17 @@ if $data13 != -111 then goto loop1 endi +print step 2==================== + +sql create database test vgroups 1 ; +sql use test; +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql_error create stream streams1 trigger max_delay 4000a ignore update 0 ignore expired 0 into streamtST1 as select _wstart, count(*) from st interval(5s); +sql_error create stream streams2 trigger max_delay 4s ignore update 0 ignore expired 0 into streamtST2 as select _wstart, count(*) from st interval(5s); +sql create stream streams3 trigger max_delay 5000a ignore update 0 ignore expired 0 into streamtST3 as select _wstart, count(*) from st interval(5s); +sql create stream streams4 trigger max_delay 5s ignore update 0 ignore expired 0 into streamtST4 as select _wstart, count(*) from st interval(5s); + system sh/exec.sh -n dnode1 -s stop -x SIGINT From 8930252f97cab2342d1342c0f18af5a914c020fb Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 17 Oct 2024 11:22:24 +0800 Subject: [PATCH 58/72] fix: tag filed snprintf lenth error --- source/libs/command/src/command.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 27a43f7523..95c73763bf 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -551,7 +551,7 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) { (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, sizeof(type) - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type); } } From c8e8cb06976734e7894a9dddd5ed9c811d5bd7e2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 17 Oct 2024 11:48:00 +0800 Subject: [PATCH 59/72] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 2 +- source/dnode/vnode/src/tq/tq.c | 6 +- source/dnode/vnode/src/tq/tqSink.c | 528 +++++++++++++++------------- source/libs/stream/src/streamTask.c | 2 +- 4 files changed, 296 insertions(+), 242 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 58c1707e1f..a189cee0bb 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -236,7 +236,7 @@ typedef struct { void* vnode; // not available to encoder and decoder FTbSink* tbSinkFunc; STSchema* pTSchema; - SSHashObj* pTblInfo; + SSHashObj* pTbInfo; } STaskSinkTb; typedef struct { diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index a37a9787c9..b75baea08d 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -746,13 +746,13 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV return terrno; } - pOutputInfo->tbSink.pTblInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); - if (pOutputInfo->tbSink.pTblInfo == NULL) { + pOutputInfo->tbSink.pTbInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); + if (pOutputInfo->tbSink.pTbInfo == NULL) { tqError("vgId:%d failed init sink tableInfo, code:%s", vgId, tstrerror(terrno)); return terrno; } - tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTblInfo, freePtr); + tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTbInfo, freePtr); } if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 6daa9213aa..be41f7e99e 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -18,6 +18,8 @@ #include "tmsg.h" #include "tq.h" +#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) + typedef struct STableSinkInfo { uint64_t uid; tstr name; @@ -35,16 +37,22 @@ static int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema int64_t earlyTs, const char* id); static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo, const char* dstTableName, int64_t* uid); -static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, - const char* id); -static int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id); + static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid); static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName, int32_t numOfTags); static int32_t createDefaultTagColName(SArray** pColNameList); -static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, - int64_t gid, bool newSubTableRule); -static int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo); +static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, + const char* stbFullName, int64_t gid, bool newSubTableRule); +static int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo); +static int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, + const char* id); +static bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo); +static int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id); +static int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode); +static void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs); +static int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode, + int64_t earlyTs); int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq, const char* pIdStr, bool newSubTableRule) { @@ -81,7 +89,8 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p memcpy(name, varDataVal(varTbName), varDataLen(varTbName)); name[varDataLen(varTbName)] = '\0'; - if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 && stbFullName) { + if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 && + stbFullName) { int32_t code = buildCtbNameAddGroupId(stbFullName, name, groupId, cap); if (code != TSDB_CODE_SUCCESS) { return code; @@ -161,16 +170,6 @@ end: return ret; } -static bool tqGetTableInfo(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) { - void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t)); - if (pVal) { - *pInfo = *(STableSinkInfo**)pVal; - return true; - } - - return false; -} - static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) { void* buf = NULL; int32_t tlen = 0; @@ -201,7 +200,7 @@ int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const int32_t code = tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (code == 0) { pCreateTableReq->ctb.stbName = taosStrdup((char*)tNameGetTableName(&name)); - if (pCreateTableReq->ctb.stbName == NULL) { // ignore this error code + if (pCreateTableReq->ctb.stbName == NULL) { // ignore this error code tqError("failed to duplicate the stb name:%s, failed to init create-table msg and create req table", stbFullName); code = terrno; } @@ -231,7 +230,7 @@ int32_t createDefaultTagColName(SArray** pColNameList) { } int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, - int64_t gid, bool newSubTableRule) { + int64_t gid, bool newSubTableRule) { if (pDataBlock->info.parTbName[0]) { if (newSubTableRule && !isAutoTableName(pDataBlock->info.parTbName) && !alreadyAddGroupId(pDataBlock->info.parTbName, gid) && gid != 0 && stbFullName) { @@ -245,18 +244,17 @@ int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* if (code != TSDB_CODE_SUCCESS) { return code; } -// tqDebug("gen name from:%s", pDataBlock->info.parTbName); + // tqDebug("gen name from:%s", pDataBlock->info.parTbName); } else { pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName); if (pCreateTableReq->name == NULL) { return terrno; } -// tqDebug("copy name:%s", pDataBlock->info.parTbName); + // tqDebug("copy name:%s", pDataBlock->info.parTbName); } } else { int32_t code = buildCtbNameByGroupId(stbFullName, gid, &pCreateTableReq->name); return code; -// tqDebug("gen name from stbFullName:%s gid:%"PRId64, stbFullName, gid); } return 0; @@ -264,16 +262,20 @@ int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask, int64_t suid) { - STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; - int32_t rows = pDataBlock->info.rows; - SArray* tagArray = taosArrayInit(4, sizeof(STagVal)); - const char* id = pTask->id.idStr; - int32_t vgId = pTask->pMeta->vgId; - int32_t code = 0; + STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; + int32_t rows = pDataBlock->info.rows; + SArray* tagArray = NULL; + const char* id = pTask->id.idStr; + int32_t vgId = pTask->pMeta->vgId; + int32_t code = 0; + STableSinkInfo* pInfo = NULL; + SVCreateTbBatchReq reqs = {0}; + SArray* crTblArray = NULL; tqDebug("s-task:%s build create %d table(s) msg", id, rows); - SVCreateTbBatchReq reqs = {0}; - SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq)); + + tagArray = taosArrayInit(4, sizeof(STagVal)); + crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq)); if ((NULL == reqs.pArray) || (tagArray == NULL)) { tqError("s-task:%s failed to init create table msg, code:%s", id, tstrerror(terrno)); code = terrno; @@ -291,6 +293,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S tqError("s-task:%s vgId:%d failed to init create table msg", id, vgId); continue; } + taosArrayClear(tagArray); if (size == 2) { @@ -356,8 +359,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S } } - code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, - pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1); + code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, IS_NEW_SUBTB_RULE(pTask)); if (code) { goto _end; } @@ -368,16 +370,15 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S goto _end; } - STableSinkInfo* pInfo = NULL; - bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, gid, &pInfo); + bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, gid, &pInfo); if (!alreadyCached) { - code = doCreateSinkInfo(pCreateTbReq->name, &pInfo); + code = doCreateSinkTableInfo(pCreateTbReq->name, &pInfo); if (code) { tqError("vgId:%d failed to create sink tableInfo for table:%s, s-task:%s", vgId, pCreateTbReq->name, id); continue; } - code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pInfo, gid, id); + code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pInfo, gid, id); if (code) { tqError("vgId:%d failed to put sink tableInfo:%s into cache, s-task:%s", vgId, pCreateTbReq->name, id); } @@ -465,45 +466,45 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c k += 1; } else { - // check for the existance of primary key - if (pNewRow->numOfPKs == 0) { + // check for the existance of primary key + if (pNewRow->numOfPKs == 0) { + void* p = taosArrayPush(pFinal, &pNewRow); + if (p == NULL) { + return terrno; + } + + k += 1; + j += 1; + tRowDestroy(pOldRow); + } else { + numOfPk = pNewRow->numOfPKs; + + SRowKey kNew, kOld; + tRowGetKey(pNewRow, &kNew); + tRowGetKey(pOldRow, &kOld); + + int32_t ret = tRowKeyCompare(&kNew, &kOld); + if (ret <= 0) { void* p = taosArrayPush(pFinal, &pNewRow); if (p == NULL) { return terrno; } - k += 1; j += 1; - tRowDestroy(pOldRow); - } else { - numOfPk = pNewRow->numOfPKs; - - SRowKey kNew, kOld; - tRowGetKey(pNewRow, &kNew); - tRowGetKey(pOldRow, &kOld); - - int32_t ret = tRowKeyCompare(&kNew, &kOld); - if (ret <= 0) { - void* p = taosArrayPush(pFinal, &pNewRow); - if (p == NULL) { - return terrno; - } - - j += 1; - - if (ret == 0) { - k += 1; - tRowDestroy(pOldRow); - } - } else { - void* p = taosArrayPush(pFinal, &pOldRow); - if (p == NULL) { - return terrno; - } + if (ret == 0) { k += 1; + tRowDestroy(pOldRow); } + } else { + void* p = taosArrayPush(pFinal, &pOldRow); + if (p == NULL) { + return terrno; + } + + k += 1; } + } } } @@ -527,8 +528,8 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c taosArrayDestroy(pExisted->aRowP); pExisted->aRowP = pFinal; - tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", - id, (int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL), + tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id, + (int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL), (pNew->pCreateTbReq != NULL)); tdDestroySVCreateTbReq(pNew->pCreateTbReq); @@ -727,7 +728,7 @@ int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema, SSDat dataIndex++; } else { void* colData = colDataGetData(pColData, j); - if (IS_VAR_DATA_TYPE(pCol->type)) { // address copy, no value + if (IS_VAR_DATA_TYPE(pCol->type)) { // address copy, no value SValue sv = (SValue){.type = pCol->type, .nData = varDataLen(colData), .pData = (uint8_t*)varDataVal(colData)}; SColVal cv = COL_VAL_VALUE(pCol->colId, sv); @@ -806,7 +807,7 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI return TSDB_CODE_SUCCESS; } -int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo) { +int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo) { int32_t nameLen = strlen(pDstTableName); (*pInfo) = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1); if (*pInfo == NULL) { @@ -830,7 +831,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat STableSinkInfo* pTableSinkInfo = NULL; int32_t code = 0; - bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, groupId, &pTableSinkInfo); + bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, groupId, &pTableSinkInfo); if (alreadyCached) { if (dstTableName[0] == 0) { // data block does not set the destination table name @@ -870,7 +871,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat } } - code = doCreateSinkInfo(dstTableName, &pTableSinkInfo); + code = doCreateSinkTableInfo(dstTableName, &pTableSinkInfo); if (code == 0) { tqDebug("s-task:%s build new sinkTableInfo to add cache, dstTable:%s", id, dstTableName); } else { @@ -906,14 +907,14 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat SArray* pTagArray = taosArrayInit(pTSchema->numOfCols + 1, sizeof(STagVal)); if (pTagArray == NULL) { + tqError("s-task:%s failed to build auto create submit msg in sink, vgId:%d, due to %s", id, vgId, + tstrerror(terrno)); return terrno; } pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE; - code = - buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray, - (pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1), - &pTableData->pCreateTbReq); + code = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray, + IS_NEW_SUBTB_RULE(pTask), &pTableData->pCreateTbReq); taosArrayDestroy(pTagArray); if (code) { @@ -923,12 +924,12 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat } pTableSinkInfo->uid = 0; - code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id); + code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id); } else { metaReaderClear(&mr); - tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id, - vgId, dstTableName); + tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id, vgId, + dstTableName); return TSDB_CODE_TDB_TABLE_NOT_EXIST; } } else { @@ -944,7 +945,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat pTableSinkInfo->uid = mr.me.uid; metaReaderClear(&mr); - code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id); + code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id); } } } @@ -952,8 +953,8 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat return code; } -int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, - SSubmitTbData* pTableData, int64_t earlyTs, const char* id) { +int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, + SSubmitTbData* pTableData, int64_t earlyTs, const char* id) { int32_t numOfRows = pDataBlock->info.rows; char* dstTableName = pDataBlock->info.parTbName; @@ -975,6 +976,43 @@ int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_ return code; } +int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode) { + int32_t code = TSDB_CODE_SUCCESS; + const char* id = pTask->id.idStr; + STaskOutputInfo* pOutputInfo = &pTask->outputInfo; + int32_t vgId = pTask->pMeta->vgId; + + if (pTask->outputInfo.tbSink.pTagSchema == NULL) { + SMetaReader mer1 = {0}; + metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK); + + code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid); + if (code != TSDB_CODE_SUCCESS) { + tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId); + metaReaderClear(&mer1); + return code; + } + + pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag); + metaReaderClear(&mer1); + + if (pOutputInfo->tbSink.pTagSchema == NULL) { + tqError("s-task:%s failed to clone tag schema, code:%s, failed to sink results", id, tstrerror(terrno)); + return terrno; + } + + SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema; + SSchema* pCol1 = &pTagSchema->pSchema[0]; + if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) { + pOutputInfo->tbSink.autoCreateCtb = true; + } else { + pOutputInfo->tbSink.autoCreateCtb = false; + } + } + + return code; +} + void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { const SArray* pBlocks = (const SArray*)data; SVnode* pVnode = (SVnode*)vnode; @@ -988,27 +1026,9 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { int64_t earlyTs = tsdbGetEarliestTs(pVnode->pTsdb); STaskOutputInfo* pOutputInfo = &pTask->outputInfo; - if (pTask->outputInfo.tbSink.pTagSchema == NULL) { - SMetaReader mer1 = {0}; - metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK); - - code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid); - if (code != TSDB_CODE_SUCCESS) { - tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId); - metaReaderClear(&mer1); - return; - } - - pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag); - metaReaderClear(&mer1); - - SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema; - SSchema* pCol1 = &pTagSchema->pSchema[0]; - if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) { - pOutputInfo->tbSink.autoCreateCtb = true; - } else { - pOutputInfo->tbSink.autoCreateCtb = false; - } + code = checkTagSchema(pTask, pVnode); + if (code != TSDB_CODE_SUCCESS) { + return; } bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks); @@ -1033,144 +1053,16 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { } else if (pDataBlock->info.type == STREAM_CHECKPOINT) { continue; } else { - pTask->execInfo.sink.numOfBlocks += 1; - - SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))}; - if (submitReq.aSubmitTbData == NULL) { - code = terrno; - tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code)); - return; - } - - SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP}; - code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData); - if (code != TSDB_CODE_SUCCESS) { - tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName); - continue; - } - - code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); - if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { - if (tbData.pCreateTbReq != NULL) { - tdDestroySVCreateTbReq(tbData.pCreateTbReq); - (void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, pDataBlock->info.id.groupId, id); - tbData.pCreateTbReq = NULL; - } - continue; - } - - void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData); - if (p == NULL) { - tqDebug("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id); - } - - code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1); - if (code) { // failed and continue - tqDebug("vgId:%d, s-task:%s submit msg failed, data lost", vgId, id); - } + code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs); } } } else { tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks); - SHashObj* pTableIndexMap = - taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - - SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))}; - if (submitReq.aSubmitTbData == NULL) { - code = terrno; - tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code)); - taosHashCleanup(pTableIndexMap); + if (streamTaskShouldStop(pTask)) { return; } - bool hasSubmit = false; - for (int32_t i = 0; i < numOfBlocks; i++) { - if (streamTaskShouldStop(pTask)) { - taosHashCleanup(pTableIndexMap); - tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE); - return; - } - - SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); - if (pDataBlock == NULL) { - continue; - } - - if (pDataBlock->info.type == STREAM_CHECKPOINT) { - continue; - } - - hasSubmit = true; - pTask->execInfo.sink.numOfBlocks += 1; - uint64_t groupId = pDataBlock->info.id.groupId; - - SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP}; - - int32_t* index = taosHashGet(pTableIndexMap, &groupId, sizeof(groupId)); - if (index == NULL) { // no data yet, append it - code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData); - if (code != TSDB_CODE_SUCCESS) { - tqError("vgId:%d dst-table gid:%" PRId64 " not exist, discard stream results", vgId, groupId); - continue; - } - - code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); - if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { - if (tbData.pCreateTbReq != NULL) { - tdDestroySVCreateTbReq(tbData.pCreateTbReq); - (void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, groupId, id); - tbData.pCreateTbReq = NULL; - } - continue; - } - - void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData); - if (p == NULL) { - tqError("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id); - continue; - } - - int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1; - code = taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size)); - if (code) { - tqError("vgId:%d, s-task:%s failed to put group into index map, code:%s", vgId, id, tstrerror(code)); - continue; - } - } else { - code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); - if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { - if (tbData.pCreateTbReq != NULL) { - tdDestroySVCreateTbReq(tbData.pCreateTbReq); - tbData.pCreateTbReq = NULL; - } - continue; - } - - SSubmitTbData* pExisted = taosArrayGet(submitReq.aSubmitTbData, *index); - if (pExisted == NULL) { - continue; - } - - code = doMergeExistedRows(pExisted, &tbData, id); - if (code != TSDB_CODE_SUCCESS) { - continue; - } - } - - pTask->execInfo.sink.numOfRows += pDataBlock->info.rows; - } - - taosHashCleanup(pTableIndexMap); - - if (hasSubmit) { - code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, numOfBlocks); - if (code) { // failed and continue - tqError("vgId:%d failed to build and send submit msg", vgId); - } - } else { - tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE); - tqDebug("vgId:%d, s-task:%s write results completed", vgId, id); - } + reubuildAndSendMultiResBlock(pTask, pBlocks, pVnode, earlyTs); } } @@ -1190,7 +1082,7 @@ bool hasOnlySubmitData(const SArray* pBlocks, int32_t numOfBlocks) { return true; } -int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) { +int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) { int32_t code = tSimpleHashPut(pSinkTableMap, &groupId, sizeof(uint64_t), &pTableSinkInfo, POINTER_BYTES); if (code != TSDB_CODE_SUCCESS) { taosMemoryFreeClear(pTableSinkInfo); @@ -1202,7 +1094,17 @@ int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, return code; } -int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) { +bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) { + void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t)); + if (pVal) { + *pInfo = *(STableSinkInfo**)pVal; + return true; + } + + return false; +} + +int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) { if (tSimpleHashGetSize(pSinkTableMap) == 0) { return TSDB_CODE_SUCCESS; } @@ -1223,8 +1125,8 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* return terrno; } - int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, - pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1); + int32_t code = + tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, IS_NEW_SUBTB_RULE(pTask)); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -1262,3 +1164,155 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* return TSDB_CODE_SUCCESS; } + +void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs) { + int32_t code = 0; + const char* id = pTask->id.idStr; + int32_t vgId = pTask->pMeta->vgId; + int32_t numOfBlocks = taosArrayGetSize(pBlocks); + int64_t suid = pTask->outputInfo.tbSink.stbUid; + STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; + char* stbFullName = pTask->outputInfo.tbSink.stbFullName; + + SHashObj* pTableIndexMap = + taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + + SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))}; + if (submitReq.aSubmitTbData == NULL) { + code = terrno; + tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code)); + taosHashCleanup(pTableIndexMap); + return; + } + + bool hasSubmit = false; + for (int32_t i = 0; i < numOfBlocks; i++) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + if (pDataBlock == NULL) { + continue; + } + + if (pDataBlock->info.type == STREAM_CHECKPOINT) { + continue; + } + + hasSubmit = true; + pTask->execInfo.sink.numOfBlocks += 1; + uint64_t groupId = pDataBlock->info.id.groupId; + + SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP}; + + int32_t* index = taosHashGet(pTableIndexMap, &groupId, sizeof(groupId)); + if (index == NULL) { // no data yet, append it + code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData); + if (code != TSDB_CODE_SUCCESS) { + tqError("vgId:%d dst-table gid:%" PRId64 " not exist, discard stream results", vgId, groupId); + continue; + } + + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); + if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { + if (tbData.pCreateTbReq != NULL) { + tdDestroySVCreateTbReq(tbData.pCreateTbReq); + (void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, groupId, id); + tbData.pCreateTbReq = NULL; + } + continue; + } + + void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData); + if (p == NULL) { + tqError("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id); + continue; + } + + int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1; + code = taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size)); + if (code) { + tqError("vgId:%d, s-task:%s failed to put group into index map, code:%s", vgId, id, tstrerror(code)); + continue; + } + } else { + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id); + if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { + if (tbData.pCreateTbReq != NULL) { + tdDestroySVCreateTbReq(tbData.pCreateTbReq); + tbData.pCreateTbReq = NULL; + } + continue; + } + + SSubmitTbData* pExisted = taosArrayGet(submitReq.aSubmitTbData, *index); + if (pExisted == NULL) { + continue; + } + + code = doMergeExistedRows(pExisted, &tbData, id); + if (code != TSDB_CODE_SUCCESS) { + continue; + } + } + + pTask->execInfo.sink.numOfRows += pDataBlock->info.rows; + } + + taosHashCleanup(pTableIndexMap); + + if (hasSubmit) { + code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, numOfBlocks); + if (code) { // failed and continue + tqError("vgId:%d failed to build and send submit msg", vgId); + } + } else { + tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE); + tqDebug("vgId:%d, s-task:%s write results completed", vgId, id); + } +} + +int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode, int64_t earlyTs) { + int32_t code = 0; + STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; + int64_t suid = pTask->outputInfo.tbSink.stbUid; + const char* id = pTask->id.idStr; + int32_t vgId = TD_VID(pVnode); + char* stbFullName = pTask->outputInfo.tbSink.stbFullName; + + pTask->execInfo.sink.numOfBlocks += 1; + + SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))}; + if (submitReq.aSubmitTbData == NULL) { + tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(terrno)); + return terrno; + } + + SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP}; + code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData); + if (code != TSDB_CODE_SUCCESS) { + tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName); + return code; + } + + code = tqSetDstTableDataPayload(suid, pTSchema, index, pDataBlock, &tbData, earlyTs, id); + if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { + if (tbData.pCreateTbReq != NULL) { + tdDestroySVCreateTbReq(tbData.pCreateTbReq); + (void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, pDataBlock->info.id.groupId, id); + tbData.pCreateTbReq = NULL; + } + + return code; + } + + void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData); + if (p == NULL) { + tqDebug("vgId:%d, s-task:%s failed to build submit msg, code:%s, data lost", vgId, id, tstrerror(terrno)); + return terrno; + } + + code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1); + if (code) { // failed and continue + tqDebug("vgId:%d, s-task:%s submit msg failed, code:%s data lost", vgId, id, tstrerror(code)); + } + + return code; +} diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 727701e03e..b359cdfc81 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -289,7 +289,7 @@ void tFreeStreamTask(SStreamTask* pTask) { if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) { tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pSchemaWrapper); taosMemoryFree(pTask->outputInfo.tbSink.pTSchema); - tSimpleHashCleanup(pTask->outputInfo.tbSink.pTblInfo); + tSimpleHashCleanup(pTask->outputInfo.tbSink.pTbInfo); tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pTagSchema); } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { taosArrayDestroy(pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos); From 01cc9018cf23a051f4ecf468b8c534d4bce2f27e Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 17 Oct 2024 03:48:12 +0000 Subject: [PATCH 60/72] feat/TS-4785-redo-multi-create-db --- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 70c873e0f5..834b43c380 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -415,14 +415,14 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { goto _OVER; } - // taosThreadMutexLock(&pMgmt->createLock); + taosThreadMutexLock(&pMgmt->createLock); code = vmWriteVnodeListToFile(pMgmt); if (code != 0) { code = terrno != 0 ? terrno : code; - // taosThreadMutexUnlock(&pMgmt->createLock); + taosThreadMutexUnlock(&pMgmt->createLock); goto _OVER; } - // taosThreadMutexUnlock(&pMgmt->createLock); + taosThreadMutexUnlock(&pMgmt->createLock); _OVER: if (code != 0) { @@ -1037,7 +1037,7 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TRIM, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_S3MIGRATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMultiMgmtQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_VNODE_TYPE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER; From 9d40a6d68bc37ac5c30e50d228ae54245d9de322 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:04:05 +0800 Subject: [PATCH 61/72] Update 3.3.3.0.md --- docs/zh/28-releases/03-notes/3.3.3.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/28-releases/03-notes/3.3.3.0.md b/docs/zh/28-releases/03-notes/3.3.3.0.md index 405ca83d71..bb7bc0f831 100644 --- a/docs/zh/28-releases/03-notes/3.3.3.0.md +++ b/docs/zh/28-releases/03-notes/3.3.3.0.md @@ -10,7 +10,7 @@ description: 3.3.3.0 版本说明 4. TDengine支持macOS企业版客户端 [企业版] 5. taosX日志默认不写入syslog [企业版] 6. 服务端记录所有慢查询信息到log库 -7. show cluster machines 查询结果中添加服务端版本号 +7. show cluster machines 查询结果中添加服务端版本号 [企业版] 8. 删除保留关键字LEVEL/ENCODE/COMPRESS, 可以作为列名/表名/数据库名等使用 9. 禁止动态修改临时目录 10. round 函数:支持四舍五入的精度 From c60c3b238e216712f92f3b8553c4a912dc0f6465 Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 17 Oct 2024 06:36:20 +0000 Subject: [PATCH 62/72] feat/TS-4785-redo-multi-create-db-fix-check --- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 834b43c380..7e950ef1be 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -415,14 +415,24 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { goto _OVER; } - taosThreadMutexLock(&pMgmt->createLock); + code = taosThreadMutexLock(&pMgmt->createLock); + if (code != 0) { + dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(code)); + goto _OVER; + } code = vmWriteVnodeListToFile(pMgmt); if (code != 0) { code = terrno != 0 ? terrno : code; - taosThreadMutexUnlock(&pMgmt->createLock); + int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock); + if (ret != 0) { + dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret)); + } goto _OVER; } - taosThreadMutexUnlock(&pMgmt->createLock); + int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock); + if (ret != 0) { + dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret)); + } _OVER: if (code != 0) { From d0618b6e8553a5f073266f82ab650dc63721399e Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Wed, 16 Oct 2024 23:35:53 +0800 Subject: [PATCH 63/72] fix:[TD-32592] fix bug when percentile split bucket. --- source/libs/function/src/tpercentile.c | 39 ++++++++++++++------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 29c48460c0..429ab52a8d 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -224,19 +224,18 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index *index = -1; - if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) { + if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v)) { return TSDB_CODE_SUCCESS; } // divide a range of [dMinVal, dMaxVal] into 1024 buckets double span = pBucket->range.dMaxVal - pBucket->range.dMinVal; - if (span < pBucket->numOfSlots) { - int32_t delta = (int32_t)(v - pBucket->range.dMinVal); - *index = (delta % pBucket->numOfSlots); + if (fabs(span) < DBL_EPSILON) { + *index = 0; } else { double slotSpan = span / pBucket->numOfSlots; *index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); - if (v == pBucket->range.dMaxVal) { + if (fabs(v - pBucket->range.dMaxVal) < DBL_EPSILON) { *index -= 1; } } @@ -583,48 +582,52 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction *result = getIdenticalDataVal(pMemBucket, i); return TSDB_CODE_SUCCESS; } - // try next round - pMemBucket->times += 1; - // qDebug("MemBucket:%p, start next round data bucketing, time:%d", pMemBucket, pMemBucket->times); - - pMemBucket->range = pSlot->range; - pMemBucket->total = 0; - - resetSlotInfo(pMemBucket); - - int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1); + tMemBucket *tmpBucket = NULL; + int32_t code = tMemBucketCreate(pMemBucket->bytes, pMemBucket->type, pSlot->range.dMinVal, pSlot->range.dMaxVal, + false, &tmpBucket); + if (TSDB_CODE_SUCCESS != code) { + tMemBucketDestroy(&tmpBucket); + return code; + } + int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times); SArray* list; void *p = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); if (p != NULL) { list = *(SArray **)p; if (list == NULL || list->size <= 0) { + tMemBucketDestroy(&tmpBucket); return -1; } } else { + tMemBucketDestroy(&tmpBucket); return -1; } for (int32_t f = 0; f < list->size; ++f) { int32_t *pageId = taosArrayGet(list, f); if (NULL == pageId) { + tMemBucketDestroy(&tmpBucket); return TSDB_CODE_OUT_OF_RANGE; } SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); if (pg == NULL) { + tMemBucketDestroy(&tmpBucket); return terrno; } - int32_t code = tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num); + code = tMemBucketPut(tmpBucket, pg->data, (int32_t)pg->num); if (code != TSDB_CODE_SUCCESS) { + tMemBucketDestroy(&tmpBucket); return code; } setBufPageDirty(pg, true); releaseBufPage(pMemBucket->pBuffer, pg); } - - return getPercentileImpl(pMemBucket, count - num, fraction, result); + code = getPercentileImpl(tmpBucket, count - num, fraction, result); + tMemBucketDestroy(&tmpBucket); + return code; } } else { num += pSlot->info.size; From 9017474e221541e8aba64b491d5318744ee53720 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 17 Oct 2024 15:35:32 +0800 Subject: [PATCH 64/72] adj ci --- tests/script/tsim/stream/windowClose.sim | 10 +++++----- tests/system-test/8-stream/max_delay_session.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/script/tsim/stream/windowClose.sim b/tests/script/tsim/stream/windowClose.sim index a9c55bd9de..8e4e666dc9 100644 --- a/tests/script/tsim/stream/windowClose.sim +++ b/tests/script/tsim/stream/windowClose.sim @@ -48,15 +48,15 @@ sql create table t1 using st tags(1); sql create table t2 using st tags(2); sql create stream stream2 trigger window_close into streamt2 as select _wstart, sum(a) from st interval(10s); -sql create stream stream3 trigger max_delay 1s into streamt3 as select _wstart, sum(a) from st interval(10s); +sql create stream stream3 trigger max_delay 5s into streamt3 as select _wstart, sum(a) from st interval(10s); sql create stream stream4 trigger window_close into streamt4 as select _wstart, sum(a) from t1 interval(10s); -sql create stream stream5 trigger max_delay 1s into streamt5 as select _wstart, sum(a) from t1 interval(10s); +sql create stream stream5 trigger max_delay 5s into streamt5 as select _wstart, sum(a) from t1 interval(10s); sql create stream stream6 trigger window_close into streamt6 as select _wstart, sum(a) from st session(ts, 10s); -sql create stream stream7 trigger max_delay 1s into streamt7 as select _wstart, sum(a) from st session(ts, 10s); +sql create stream stream7 trigger max_delay 5s into streamt7 as select _wstart, sum(a) from st session(ts, 10s); sql create stream stream8 trigger window_close into streamt8 as select _wstart, sum(a) from t1 session(ts, 10s); -sql create stream stream9 trigger max_delay 1s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s); +sql create stream stream9 trigger max_delay 5s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s); sql create stream stream10 trigger window_close into streamt10 as select _wstart, sum(a) from t1 state_window(b); -sql create stream stream11 trigger max_delay 1s into streamt11 as select _wstart, sum(a) from t1 state_window(b); +sql create stream stream11 trigger max_delay 5s into streamt11 as select _wstart, sum(a) from t1 state_window(b); run tsim/stream/checkTaskStatus.sim diff --git a/tests/system-test/8-stream/max_delay_session.py b/tests/system-test/8-stream/max_delay_session.py index 934fbbcac2..71827f03b4 100644 --- a/tests/system-test/8-stream/max_delay_session.py +++ b/tests/system-test/8-stream/max_delay_session.py @@ -92,7 +92,7 @@ class TDTestCase: def run(self): for fill_history_value in [None, 1]: for watermark in [None, random.randint(20, 30)]: - self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value) + self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(5, 8)}s", fill_history_value=fill_history_value) def stop(self): tdSql.close() From e64f55f5b1252ed27b872721869efee8eabbe6fd Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 17 Oct 2024 18:06:16 +0800 Subject: [PATCH 65/72] adj ci --- tests/script/tsim/stream/windowClose.sim | 56 ++++++++++++------------ 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/tests/script/tsim/stream/windowClose.sim b/tests/script/tsim/stream/windowClose.sim index 8e4e666dc9..2d4e6d7ea3 100644 --- a/tests/script/tsim/stream/windowClose.sim +++ b/tests/script/tsim/stream/windowClose.sim @@ -138,12 +138,12 @@ if $rows != 2 then goto loop1 endi -print step 1 max delay 2s +print step 1 max delay 5s sql create database test3 vgroups 4; sql use test3; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream13 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 interval(10s); +sql create stream stream13 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 interval(10s); run tsim/stream/checkTaskStatus.sim @@ -172,8 +172,8 @@ $now02 = $data02 $now12 = $data12 -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -188,7 +188,7 @@ if $data12 != $now12 then return -1 endi -print step 2 max delay 2s +print step 2 max delay 5s sql create database test4 vgroups 4; sql use test4; @@ -197,7 +197,7 @@ sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,t sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream stream14 trigger max_delay 2s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s); +sql create stream stream14 trigger max_delay 5s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s); run tsim/stream/checkTaskStatus.sim @@ -234,8 +234,8 @@ $now12 = $data12 $now22 = $data22 $now32 = $data32 -print step2 max delay 2s......... sleep 3s -sleep 3000 +print step2 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt14 order by 2; print $data00 $data01 $data02 @@ -264,8 +264,8 @@ if $data32 != $now32 then return -1 endi -print step2 max delay 2s......... sleep 3s -sleep 3000 +print step2 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt14 order by 2; print $data00 $data01 $data02 @@ -294,12 +294,12 @@ if $data32 != $now32 then return -1 endi -print step 2 max delay 2s +print step 2 max delay 5s sql create database test15 vgroups 4; sql use test15; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream15 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s); +sql create stream stream15 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s); run tsim/stream/checkTaskStatus.sim @@ -328,8 +328,8 @@ $now02 = $data02 $now12 = $data12 -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -344,8 +344,8 @@ if $data12 != $now12 then return -1 endi -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -362,12 +362,12 @@ endi print session max delay over -print step 3 max delay 2s +print step 3 max delay 5s sql create database test16 vgroups 4; sql use test16; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream16 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 state_window(a); +sql create stream stream16 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 state_window(a); run tsim/stream/checkTaskStatus.sim @@ -396,8 +396,8 @@ $now02 = $data02 $now12 = $data12 -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -412,8 +412,8 @@ if $data12 != $now12 then return -1 endi -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -430,12 +430,12 @@ endi print state max delay over -print step 4 max delay 2s +print step 4 max delay 5s sql create database test17 vgroups 4; sql use test17; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream17 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9; +sql create stream stream17 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9; run tsim/stream/checkTaskStatus.sim @@ -467,8 +467,8 @@ $now02 = $data02 $now12 = $data12 -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; @@ -483,8 +483,8 @@ if $data12 != $now12 then return -1 endi -print step1 max delay 2s......... sleep 3s -sleep 3000 +print step1 max delay 5s......... sleep 6s +sleep 6000 sql select * from streamt13; From 6d5807859b7c45270293b2c0aeb475d91e583b8e Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 17 Oct 2024 10:55:36 +0000 Subject: [PATCH 66/72] fix/TD-32594-set-stage-when-insert --- source/dnode/mnode/impl/src/mndTrans.c | 27 +++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 657601f5ae..4268d73746 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -474,6 +474,21 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) { // pTrans->startFunc = 0; } + if (pTrans->stage == TRN_STAGE_COMMIT) { + pTrans->stage = TRN_STAGE_COMMIT_ACTION; + mInfo("trans:%d, stage from commit to commitAction since perform update action", pTrans->id); + } + + if (pTrans->stage == TRN_STAGE_ROLLBACK) { + pTrans->stage = TRN_STAGE_UNDO_ACTION; + mInfo("trans:%d, stage from rollback to undoAction since perform update action", pTrans->id); + } + + if (pTrans->stage == TRN_STAGE_PRE_FINISH) { + pTrans->stage = TRN_STAGE_FINISH; + mInfo("trans:%d, stage from pre-finish to finished since perform update action", pTrans->id); + } + return 0; } @@ -563,17 +578,17 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) { if (pOld->stage == TRN_STAGE_COMMIT) { pOld->stage = TRN_STAGE_COMMIT_ACTION; - mTrace("trans:%d, stage from commit to commitAction since perform update action", pNew->id); + mInfo("trans:%d, stage from commit to commitAction since perform update action", pNew->id); } if (pOld->stage == TRN_STAGE_ROLLBACK) { pOld->stage = TRN_STAGE_UNDO_ACTION; - mTrace("trans:%d, stage from rollback to undoAction since perform update action", pNew->id); + mInfo("trans:%d, stage from rollback to undoAction since perform update action", pNew->id); } if (pOld->stage == TRN_STAGE_PRE_FINISH) { pOld->stage = TRN_STAGE_FINISH; - mTrace("trans:%d, stage from pre-finish to finished since perform update action", pNew->id); + mInfo("trans:%d, stage from pre-finish to finished since perform update action", pNew->id); } return 0; @@ -1295,7 +1310,7 @@ static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) } } -// execute at bottom half +// execute in sync context static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) { if (pAction->rawWritten) return 0; if (topHalf) { @@ -1321,7 +1336,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi TAOS_RETURN(code); } -// execute at top half +// execute in trans context static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) { if (pAction->msgSent) return 0; if (mndCannotExecuteTransAction(pMnode, topHalf)) { @@ -1701,6 +1716,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool return continueExec; } +// in trans context static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; @@ -1775,6 +1791,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans, bool return continueExec; } +// in trans context static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; From 8eacb58d9451999fc62d4434a24aa3ca2f456e48 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 18 Oct 2024 09:32:13 +0800 Subject: [PATCH 67/72] feat(stream):modify stream doc --- docs/zh/14-reference/03-taos-sql/14-stream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/03-taos-sql/14-stream.md b/docs/zh/14-reference/03-taos-sql/14-stream.md index d995c2a09b..3af8fa6921 100644 --- a/docs/zh/14-reference/03-taos-sql/14-stream.md +++ b/docs/zh/14-reference/03-taos-sql/14-stream.md @@ -153,7 +153,7 @@ SELECT * from information_schema.`ins_streams`; 由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 -因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。 +因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY最小时间是5s,如果低于5s,创建流计算时会报错。 MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 From 5adf31318ca94734530c512e58ea3c5278ca0aa3 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 18 Oct 2024 02:14:50 +0000 Subject: [PATCH 68/72] fix/TD-32597-move-kill-compact-to-write-thread --- source/dnode/mgmt/mgmt_mnode/src/mmHandle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 7204cde8f7..2b50009b29 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -208,7 +208,7 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_STATIS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_ENCRYPT_KEY, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; From dbd8d6891d0940a507a2a7e1e2fe714ba7ba8cf3 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Fri, 18 Oct 2024 12:39:49 +0800 Subject: [PATCH 69/72] fix: subprocess.popen redirect to PIPE , pipe buffer while fill full case dead-lock --- tools/auto/stmt2Performance/json/query.json | 5 +- .../auto/stmt2Performance/json/template.json | 6 +- tools/auto/stmt2Performance/stmt2Perf.py | 64 +++++++++---------- tools/auto/testCompression/json/query.json | 5 +- tools/auto/testCompression/testCompression.py | 45 +++++++------ 5 files changed, 61 insertions(+), 64 deletions(-) diff --git a/tools/auto/stmt2Performance/json/query.json b/tools/auto/stmt2Performance/json/query.json index 70f1d90edc..a6e50daae2 100644 --- a/tools/auto/stmt2Performance/json/query.json +++ b/tools/auto/stmt2Performance/json/query.json @@ -12,11 +12,10 @@ "query_mode": "taosc", "specified_table_query": { "query_interval": 0, - "concurrent": 10, + "threads": 10, "sqls": [ { - "sql": "select count(*) from meters", - "result": "./query_result.txt" + "sql": "select count(*) from meters" } ] } diff --git a/tools/auto/stmt2Performance/json/template.json b/tools/auto/stmt2Performance/json/template.json index 8c54c5be22..6d015370e9 100644 --- a/tools/auto/stmt2Performance/json/template.json +++ b/tools/auto/stmt2Performance/json/template.json @@ -17,7 +17,9 @@ "dbinfo": { "name": "dbrate", "vgroups": 1, - "drop": "yes" + "drop": "yes", + "wal_retention_size": 1, + "wal_retention_period": 1 }, "super_tables": [ { @@ -27,7 +29,7 @@ "childtable_prefix": "d", "insert_mode": "@STMT_MODE", "interlace_rows": @INTERLACE_MODE, - "insert_rows": 100000, + "insert_rows": 10000, "timestamp_step": 1, "start_timestamp": "2020-10-01 00:00:00.000", "auto_create_table": "no", diff --git a/tools/auto/stmt2Performance/stmt2Perf.py b/tools/auto/stmt2Performance/stmt2Perf.py index 4d99f2483d..ae66504c8a 100644 --- a/tools/auto/stmt2Performance/stmt2Perf.py +++ b/tools/auto/stmt2Performance/stmt2Perf.py @@ -34,28 +34,6 @@ def exec(command, show=True): print(f"exec {command}\n") return os.system(command) -# run return output and error -def run(command, timeout = 60, show=True): - if(show): - print(f"run {command} timeout={timeout}s\n") - - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - process.wait(timeout) - - output = process.stdout.read().decode(encoding="gbk") - error = process.stderr.read().decode(encoding="gbk") - - return output, error - -# return list after run -def runRetList(command, timeout=10, first=True): - output,error = run(command, timeout) - if first: - return output.splitlines() - else: - return error.splitlines() - - def readFileContext(filename): file = open(filename) context = file.read() @@ -78,6 +56,27 @@ def appendFileContext(filename, context): except: print(f"appand file error context={context} .") +# run return output and error +def run(command, show=True): + # out to file + out = "out.txt" + err = "err.txt" + ret = exec(command + f" 1>{out} 2>{err}", True) + + # read from file + output = readFileContext(out) + error = readFileContext(err) + + return output, error + +# return list after run +def runRetList(command, first=True): + output,error = run(command) + if first: + return output.splitlines() + else: + return error.splitlines() + def getFolderSize(folder): total_size = 0 for dirpath, dirnames, filenames in os.walk(folder): @@ -134,8 +133,6 @@ def getMatch(datatype, algo): def generateJsonFile(stmt, interlace): - print(f"doTest stmt: {stmt} interlace_rows={interlace}\n") - # replace datatype context = readFileContext(templateFile) # replace compress @@ -204,9 +201,16 @@ def writeTemplateInfo(resultFile): insertRows = findContextValue(context, "insert_rows") bindVGroup = findContextValue(context, "thread_bind_vgroup") nThread = findContextValue(context, "thread_count") + batch = findContextValue(context, "num_of_records_per_req") + if bindVGroup.lower().find("yes") != -1: nThread = vgroups - line = f"thread_bind_vgroup = {bindVGroup}\nvgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\ninsertThreads = {nThread} \n\n" + line = f"thread_bind_vgroup = {bindVGroup}\n" + line += f"vgroups = {vgroups}\n" + line += f"childtable_count = {childCount}\n" + line += f"insert_rows = {insertRows}\n" + line += f"insertThreads = {nThread}\n" + line += f"batchSize = {batch}\n\n" print(line) appendFileContext(resultFile, line) @@ -247,14 +251,8 @@ def totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, # %("No", "stmtMode", "interlaceRows", "spent", "spent-real", "writeSpeed", "write-real", "query-QPS", "dataSize", "rate") Number += 1 - ''' - context = "%2s %6s %10s %10s %10s %15s %15s %16s %16s %16s %16s %16s %8s %8s %8s\n"%( - Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " rows/s", writeReal + " rows/s", - min, avg, p90, p99, max, - querySpeed, str(totalSize) + " MB", rate + "%") - ''' context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%( - Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + "r/s", writeReal + "r/s", + Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " r/s", writeReal + " r/s", min, avg, p90, p99, max + "ms", querySpeed, str(totalSize) + " MB", rate + "%") @@ -323,7 +321,7 @@ def testWrite(jsonFile): def testQuery(): command = f"taosBenchmark -f json/query.json" - lines = runRetList(command, 60000) + lines = runRetList(command) # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 speed = None diff --git a/tools/auto/testCompression/json/query.json b/tools/auto/testCompression/json/query.json index e810c1009f..35c39e831c 100644 --- a/tools/auto/testCompression/json/query.json +++ b/tools/auto/testCompression/json/query.json @@ -12,11 +12,10 @@ "query_mode": "taosc", "specified_table_query": { "query_interval": 0, - "concurrent": 10, + "threads": 10, "sqls": [ { - "sql": "select * from meters", - "result": "./query_res0.txt" + "sql": "select * from meters" } ] } diff --git a/tools/auto/testCompression/testCompression.py b/tools/auto/testCompression/testCompression.py index ee922a1a23..1a0d714c44 100644 --- a/tools/auto/testCompression/testCompression.py +++ b/tools/auto/testCompression/testCompression.py @@ -34,28 +34,6 @@ def exec(command, show=True): print(f"exec {command}\n") return os.system(command) -# run return output and error -def run(command, timeout = 60, show=True): - if(show): - print(f"run {command} timeout={timeout}s\n") - - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - process.wait(timeout) - - output = process.stdout.read().decode(encoding="gbk") - error = process.stderr.read().decode(encoding="gbk") - - return output, error - -# return list after run -def runRetList(command, timeout=10, first=True): - output,error = run(command, timeout) - if first: - return output.splitlines() - else: - return error.splitlines() - - def readFileContext(filename): file = open(filename) context = file.read() @@ -78,6 +56,27 @@ def appendFileContext(filename, context): except: print(f"appand file error context={context} .") +# run return output and error +def run(command, show=True): + # out to file + out = "out.txt" + err = "err.txt" + ret = exec(command + f" 1>{out} 2>{err}", True) + + # read from file + output = readFileContext(out) + error = readFileContext(err) + + return output, error + +# return list after run +def runRetList(command, first=True): + output,error = run(command) + if first: + return output.splitlines() + else: + return error.splitlines() + def getFolderSize(folder): total_size = 0 for dirpath, dirnames, filenames in os.walk(folder): @@ -273,7 +272,7 @@ def testWrite(jsonFile): def testQuery(): command = f"taosBenchmark -f json/query.json" - lines = runRetList(command, 60000) + lines = runRetList(command) # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 speed = None From 528465ae215fc6c53ed38916bde6229dbf8e0555 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 19 Oct 2024 17:14:19 +0800 Subject: [PATCH 70/72] fix: flush database put to write end --- tools/auto/testCompression/testCompression.py | 64 +++++++++++-------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/tools/auto/testCompression/testCompression.py b/tools/auto/testCompression/testCompression.py index 1a0d714c44..4314817067 100644 --- a/tools/auto/testCompression/testCompression.py +++ b/tools/auto/testCompression/testCompression.py @@ -195,48 +195,55 @@ def findContextValue(context, label): def writeTemplateInfo(resultFile): # create info context = readFileContext(templateFile) + dbname = findContextValue(context, "name") vgroups = findContextValue(context, "vgroups") childCount = findContextValue(context, "childtable_count") insertRows = findContextValue(context, "insert_rows") line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n" print(line) appendFileContext(resultFile, line) + return dbname def totalCompressRate(algo, resultFile, writeSpeed, querySpeed): global Number - # flush - command = 'taos -s "flush database dbrate;"' - rets = exec(command) - command = 'taos -s "compact database dbrate;"' - rets = exec(command) - waitCompactFinish(60) + loop = 30 - # read compress rate - command = 'taos -s "show table distributed dbrate.meters\G;"' - rets = runRetList(command) - print(rets) - str1 = rets[5] - arr = str1.split(" ") + while loop > 0: + loop -= 1 - # Total_Size KB - str2 = arr[2] - pos = str2.find("=[") - totalSize = int(float(str2[pos+2:])/1024) + # flush database + command = 'taos -s "flush database dbrate;"' + exec(command) + time.sleep(1) - # Compression_Ratio - str2 = arr[6] - pos = str2.find("=[") - rate = str2[pos+2:] - print("rate =" + rate) + # read compress rate + command = 'taos -s "show table distributed dbrate.meters\G;"' + rets = runRetList(command) + print(rets) - # total data file size - #dataSize = getFolderSize(f"{dataDir}/vnode/") - #dataSizeMB = int(dataSize/1024/1024) + str1 = rets[5] + arr = str1.split(" ") - # appand to file - + # Total_Size KB + str2 = arr[2] + pos = str2.find("=[") + totalSize = int(float(str2[pos+2:])/1024) + + # Compression_Ratio + str2 = arr[6] + pos = str2.find("=[") + rate = str2[pos+2:] + print("rate =" + rate) + if rate != "0.00": + break + + # total data file size + #dataSize = getFolderSize(f"{dataDir}/vnode/") + #dataSizeMB = int(dataSize/1024/1024) + + # appand to file Number += 1 context = "%10s %10s %10s %10s %30s %15s\n"%( Number, algo, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed) showLog(context) @@ -268,6 +275,10 @@ def testWrite(jsonFile): speed = context[pos: end] #print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n") + + # flush database + command = 'taos -s "flush database dbrate;"' + exec(command) return speed def testQuery(): @@ -295,7 +306,6 @@ def testQuery(): def doTest(algo, resultFile): print(f"doTest algo: {algo} \n") - #cleanAndStartTaosd() # json jsonFile = generateJsonFile(algo) From 98702d8fb87750aa5f8bf8a8be32c3d7cf0e5d48 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 20 Oct 2024 20:36:24 +0800 Subject: [PATCH 71/72] fix: query time from 20 reduce to 5 --- tools/auto/stmt2Performance/json/query.json | 2 +- tools/auto/testCompression/json/query.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/auto/stmt2Performance/json/query.json b/tools/auto/stmt2Performance/json/query.json index a6e50daae2..3b6750be13 100644 --- a/tools/auto/stmt2Performance/json/query.json +++ b/tools/auto/stmt2Performance/json/query.json @@ -8,7 +8,7 @@ "confirm_parameter_prompt": "no", "continue_if_fail": "yes", "databases": "dbrate", - "query_times": 20, + "query_times": 5, "query_mode": "taosc", "specified_table_query": { "query_interval": 0, diff --git a/tools/auto/testCompression/json/query.json b/tools/auto/testCompression/json/query.json index 35c39e831c..12bba6af9b 100644 --- a/tools/auto/testCompression/json/query.json +++ b/tools/auto/testCompression/json/query.json @@ -8,7 +8,7 @@ "confirm_parameter_prompt": "no", "continue_if_fail": "yes", "databases": "dbrate", - "query_times": 20, + "query_times": 5, "query_mode": "taosc", "specified_table_query": { "query_interval": 0, From 7a4275651172fa60868caa64bc5406996b506e2f Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Sun, 20 Oct 2024 22:36:52 +0800 Subject: [PATCH 72/72] replace snprintf with tsnprintf --- include/os/osSystem.h | 10 +++---- source/client/src/clientMain.c | 26 ++++++++--------- source/dnode/mnode/impl/src/mndSma.c | 2 +- source/libs/command/inc/commandInt.h | 10 +++---- source/libs/command/src/command.c | 2 +- source/libs/executor/src/groupcacheoperator.c | 4 +-- source/libs/function/src/builtinsimpl.c | 20 ++++++------- source/libs/scalar/src/filter.c | 28 +++++++++---------- source/libs/scalar/src/sclfunc.c | 8 +++--- source/libs/scalar/src/sclvector.c | 6 ++-- source/os/src/osTimezone.c | 2 +- utils/test/c/tmqSim.c | 4 +-- 12 files changed, 61 insertions(+), 61 deletions(-) diff --git a/include/os/osSystem.h b/include/os/osSystem.h index 44910ba94d..fe181d291a 100644 --- a/include/os/osSystem.h +++ b/include/os/osSystem.h @@ -89,9 +89,9 @@ int32_t taosResetTerminalMode(); snprintf(array[size], STACKSIZE, "0x%lx : (%s+0x%lx) [0x%lx]\n", (long)pc, fname, (long)offset, (long)pc); \ } \ if (ignoreNum < size && size > 0) { \ - offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \ + offset = tsnprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \ for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \ - offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, \ + offset += tsnprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, \ array[i]); \ } \ } \ @@ -140,9 +140,9 @@ int32_t taosResetTerminalMode(); char **strings = backtrace_symbols(array, size); \ int32_t offset = 0; \ if (strings != NULL) { \ - offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \ + offset = tsnprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \ for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \ - offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, \ + offset += tsnprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, \ strings[i]); \ } \ } \ @@ -193,7 +193,7 @@ int32_t taosResetTerminalMode(); snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \ for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \ SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \ - offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%i, %s - 0x%0X\n", \ + offset += tsnprintf(buf + offset, bufSize - 1 - offset, "frame:%i, %s - 0x%0X\n", \ (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \ } \ } \ diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 1a66266000..64631fd754 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -492,53 +492,53 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD } if (row[i] == NULL) { - len += snprintf(str + len, size - len, "%s", TSDB_DATA_NULL_STR); + len += tsnprintf(str + len, size - len, "%s", TSDB_DATA_NULL_STR); continue; } switch (fields[i].type) { case TSDB_DATA_TYPE_TINYINT: - len += snprintf(str + len, size - len, "%d", *((int8_t *)row[i])); + len += tsnprintf(str + len, size - len, "%d", *((int8_t *)row[i])); break; case TSDB_DATA_TYPE_UTINYINT: - len += snprintf(str + len, size - len, "%u", *((uint8_t *)row[i])); + len += tsnprintf(str + len, size - len, "%u", *((uint8_t *)row[i])); break; case TSDB_DATA_TYPE_SMALLINT: - len += snprintf(str + len, size - len, "%d", *((int16_t *)row[i])); + len += tsnprintf(str + len, size - len, "%d", *((int16_t *)row[i])); break; case TSDB_DATA_TYPE_USMALLINT: - len += snprintf(str + len, size - len, "%u", *((uint16_t *)row[i])); + len += tsnprintf(str + len, size - len, "%u", *((uint16_t *)row[i])); break; case TSDB_DATA_TYPE_INT: - len += snprintf(str + len, size - len, "%d", *((int32_t *)row[i])); + len += tsnprintf(str + len, size - len, "%d", *((int32_t *)row[i])); break; case TSDB_DATA_TYPE_UINT: - len += snprintf(str + len, size - len, "%u", *((uint32_t *)row[i])); + len += tsnprintf(str + len, size - len, "%u", *((uint32_t *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - len += snprintf(str + len, size - len, "%" PRId64, *((int64_t *)row[i])); + len += tsnprintf(str + len, size - len, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_UBIGINT: - len += snprintf(str + len, size - len, "%" PRIu64, *((uint64_t *)row[i])); + len += tsnprintf(str + len, size - len, "%" PRIu64, *((uint64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: { float fv = 0; fv = GET_FLOAT_VAL(row[i]); - len += snprintf(str + len, size - len, "%f", fv); + len += tsnprintf(str + len, size - len, "%f", fv); } break; case TSDB_DATA_TYPE_DOUBLE: { double dv = 0; dv = GET_DOUBLE_VAL(row[i]); - len += snprintf(str + len, size - len, "%lf", dv); + len += tsnprintf(str + len, size - len, "%lf", dv); } break; case TSDB_DATA_TYPE_VARBINARY: { @@ -576,11 +576,11 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD } break; case TSDB_DATA_TYPE_TIMESTAMP: - len += snprintf(str + len, size - len, "%" PRId64, *((int64_t *)row[i])); + len += tsnprintf(str + len, size - len, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_BOOL: - len += snprintf(str + len, size - len, "%d", *((int8_t *)row[i])); + len += tsnprintf(str + len, size - len, "%d", *((int8_t *)row[i])); default: break; } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index fa2538f245..a54c7f1b14 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -2252,7 +2252,7 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo if (nodeType(pFunc) == QUERY_NODE_FUNCTION) { SFunctionNode *pFuncNode = (SFunctionNode *)pFunc; if (!fmIsTSMASupportedFunc(pFuncNode->funcId)) continue; - len += snprintf(start, TSDB_MAX_SAVED_SQL_LEN - len, "%s%s", start != buf + VARSTR_HEADER_SIZE ? "," : "", + len += tsnprintf(start, TSDB_MAX_SAVED_SQL_LEN - len, "%s%s", start != buf + VARSTR_HEADER_SIZE ? "," : "", ((SExprNode *)pFunc)->userAlias); if (len >= TSDB_MAX_SAVED_SQL_LEN) { len = TSDB_MAX_SAVED_SQL_LEN; diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index e433d61860..feb1b3cc19 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -188,17 +188,17 @@ do { \ #define EXPLAIN_ROW_NEW(level, ...) \ do { \ if (isVerboseLine) { \ - tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, "%*s", (level) * 3 + 3, ""); \ + tlen = tsnprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, "%*s", (level) * 3 + 3, ""); \ } else { \ - tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, "%*s%s", (level) * 3, "", "-> "); \ + tlen = tsnprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, "%*s%s", (level) * 3, "", "-> "); \ } \ - tlen += snprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__); \ + tlen += tsnprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__); \ } while (0) -#define EXPLAIN_ROW_APPEND(...) tlen += snprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__) +#define EXPLAIN_ROW_APPEND(...) tlen += tsnprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__) #define EXPLAIN_ROW_END() do { varDataSetLen(tbuf, tlen); tlen += VARSTR_HEADER_SIZE; isVerboseLine = true; } while (0) -#define EXPLAIN_SUM_ROW_NEW(...) tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, __VA_ARGS__) +#define EXPLAIN_SUM_ROW_NEW(...) tlen = tsnprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, __VA_ARGS__) #define EXPLAIN_SUM_ROW_END() do { varDataSetLen(tbuf, tlen); tlen += VARSTR_HEADER_SIZE; } while (0) #define EXPLAIN_ROW_APPEND_LIMIT_IMPL(_pLimit, sl) do { \ diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 95c73763bf..b2417a8597 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -515,7 +515,7 @@ void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) { TSDB_DATA_TYPE_GEOMETRY == pSchema->type) { typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)", (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE)); } else if (TSDB_DATA_TYPE_NCHAR == pSchema->type) { - typeLen += snprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)", + typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)", (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } diff --git a/source/libs/executor/src/groupcacheoperator.c b/source/libs/executor/src/groupcacheoperator.c index 648a2ea6d2..10b372319b 100644 --- a/source/libs/executor/src/groupcacheoperator.c +++ b/source/libs/executor/src/groupcacheoperator.c @@ -84,9 +84,9 @@ static void logGroupCacheExecInfo(SGroupCacheOperatorInfo* pGrpCacheOperator) { if (NULL == buf) { return; } - int32_t offset = snprintf(buf, bufSize, "groupCache exec info, downstreamBlkNum:"); + int32_t offset = tsnprintf(buf, bufSize, "groupCache exec info, downstreamBlkNum:"); for (int32_t i = 0; i < pGrpCacheOperator->downstreamNum; ++i) { - offset += snprintf(buf + offset, bufSize, " %" PRId64 , pGrpCacheOperator->execInfo.pDownstreamBlkNum[i]); + offset += tsnprintf(buf + offset, bufSize, " %" PRId64 , pGrpCacheOperator->execInfo.pDownstreamBlkNum[i]); } qDebug("%s", buf); taosMemoryFree(buf); diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 9f50e705ca..08b25a23c5 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -4734,10 +4734,10 @@ int32_t histogramFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t len; char buf[512] = {0}; if (!pInfo->normalized) { - len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%" PRId64 "}", + len = tsnprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%" PRId64 "}", pInfo->bins[i].lower, pInfo->bins[i].upper, pInfo->bins[i].count); } else { - len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%lf}", pInfo->bins[i].lower, + len = tsnprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%lf}", pInfo->bins[i].lower, pInfo->bins[i].upper, pInfo->bins[i].percentage); } varDataSetLen(buf, len); @@ -6365,7 +6365,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { compRatio = pData->totalSize * 100 / (double)totalRawSize; } - int32_t len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, + int32_t len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Total_Blocks=[%d] Total_Size=[%.2f KiB] Average_size=[%.2f KiB] Compression_Ratio=[%.2f %c]", pData->numOfBlocks, pData->totalSize / 1024.0, averageSize / 1024.0, compRatio, '%'); @@ -6380,7 +6380,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { avgRows = pData->totalRows / pData->numOfBlocks; } - len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Block_Rows=[%" PRId64 "] MinRows=[%d] MaxRows=[%d] AvgRows=[%" PRId64 "]", + len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Block_Rows=[%" PRId64 "] MinRows=[%d] MaxRows=[%d] AvgRows=[%" PRId64 "]", pData->totalRows, pData->minRows, pData->maxRows, avgRows); varDataSetLen(st, len); code = colDataSetVal(pColInfo, row++, st, false); @@ -6388,14 +6388,14 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return code; } - len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Inmem_Rows=[%d] Stt_Rows=[%d] ", pData->numOfInmemRows, pData->numOfSttRows); + len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Inmem_Rows=[%d] Stt_Rows=[%d] ", pData->numOfInmemRows, pData->numOfSttRows); varDataSetLen(st, len); code = colDataSetVal(pColInfo, row++, st, false); if (TSDB_CODE_SUCCESS != code) { return code; } - len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Total_Tables=[%d] Total_Filesets=[%d] Total_Vgroups=[%d]", pData->numOfTables, + len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Total_Tables=[%d] Total_Filesets=[%d] Total_Vgroups=[%d]", pData->numOfTables, pData->numOfFiles, pData->numOfVgroups); varDataSetLen(st, len); @@ -6404,7 +6404,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return code; } - len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, + len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "--------------------------------------------------------------------------------"); varDataSetLen(st, len); code = colDataSetVal(pColInfo, row++, st, false); @@ -6431,7 +6431,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t bucketRange = ceil(((double) (pData->defMaxRows - pData->defMinRows)) / numOfBuckets); for (int32_t i = 0; i < tListLen(pData->blockRowsHisto); ++i) { - len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "%04d |", pData->defMinRows + bucketRange * (i + 1)); + len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "%04d |", pData->defMinRows + bucketRange * (i + 1)); int32_t num = 0; if (pData->blockRowsHisto[i] > 0) { @@ -6439,13 +6439,13 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { } for (int32_t j = 0; j < num; ++j) { - int32_t x = snprintf(varDataVal(st) + len, sizeof(st) - VARSTR_HEADER_SIZE - len, "%c", '|'); + int32_t x = tsnprintf(varDataVal(st) + len, sizeof(st) - VARSTR_HEADER_SIZE - len, "%c", '|'); len += x; } if (pData->blockRowsHisto[i] > 0) { double v = pData->blockRowsHisto[i] * 100.0 / pData->numOfBlocks; - len += snprintf(varDataVal(st) + len, sizeof(st) - VARSTR_HEADER_SIZE - len, " %d (%.2f%c)", pData->blockRowsHisto[i], v, '%'); + len += tsnprintf(varDataVal(st) + len, sizeof(st) - VARSTR_HEADER_SIZE - len, " %d (%.2f%c)", pData->blockRowsHisto[i], v, '%'); } varDataSetLen(st, len); diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 802bec00f8..03bc2b544b 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -1769,36 +1769,36 @@ int32_t fltConverToStr(char *str, int32_t strMaxLen, int type, void *buf, int32_ switch (type) { case TSDB_DATA_TYPE_NULL: - n = snprintf(str, strMaxLen, "null"); + n = tsnprintf(str, strMaxLen, "null"); break; case TSDB_DATA_TYPE_BOOL: - n = snprintf(str, strMaxLen, (*(int8_t *)buf) ? "true" : "false"); + n = tsnprintf(str, strMaxLen, (*(int8_t *)buf) ? "true" : "false"); break; case TSDB_DATA_TYPE_TINYINT: - n = snprintf(str, strMaxLen, "%d", *(int8_t *)buf); + n = tsnprintf(str, strMaxLen, "%d", *(int8_t *)buf); break; case TSDB_DATA_TYPE_SMALLINT: - n = snprintf(str, strMaxLen, "%d", *(int16_t *)buf); + n = tsnprintf(str, strMaxLen, "%d", *(int16_t *)buf); break; case TSDB_DATA_TYPE_INT: - n = snprintf(str, strMaxLen, "%d", *(int32_t *)buf); + n = tsnprintf(str, strMaxLen, "%d", *(int32_t *)buf); break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - n = snprintf(str, strMaxLen, "%" PRId64, *(int64_t *)buf); + n = tsnprintf(str, strMaxLen, "%" PRId64, *(int64_t *)buf); break; case TSDB_DATA_TYPE_FLOAT: - n = snprintf(str, strMaxLen, "%e", GET_FLOAT_VAL(buf)); + n = tsnprintf(str, strMaxLen, "%e", GET_FLOAT_VAL(buf)); break; case TSDB_DATA_TYPE_DOUBLE: - n = snprintf(str, strMaxLen, "%e", GET_DOUBLE_VAL(buf)); + n = tsnprintf(str, strMaxLen, "%e", GET_DOUBLE_VAL(buf)); break; case TSDB_DATA_TYPE_BINARY: @@ -1817,19 +1817,19 @@ int32_t fltConverToStr(char *str, int32_t strMaxLen, int type, void *buf, int32_ break; case TSDB_DATA_TYPE_UTINYINT: - n = snprintf(str, strMaxLen, "%d", *(uint8_t *)buf); + n = tsnprintf(str, strMaxLen, "%d", *(uint8_t *)buf); break; case TSDB_DATA_TYPE_USMALLINT: - n = snprintf(str, strMaxLen, "%d", *(uint16_t *)buf); + n = tsnprintf(str, strMaxLen, "%d", *(uint16_t *)buf); break; case TSDB_DATA_TYPE_UINT: - n = snprintf(str, strMaxLen, "%u", *(uint32_t *)buf); + n = tsnprintf(str, strMaxLen, "%u", *(uint32_t *)buf); break; case TSDB_DATA_TYPE_UBIGINT: - n = snprintf(str, strMaxLen, "%" PRIu64, *(uint64_t *)buf); + n = tsnprintf(str, strMaxLen, "%" PRIu64, *(uint64_t *)buf); break; default: @@ -1886,7 +1886,7 @@ int32_t filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t optio SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit); SColumnNode *refNode = (SColumnNode *)left->desc; if (unit->compare.optr <= OP_TYPE_JSON_CONTAINS) { - len += snprintf(str, sizeof(str), "UNIT[%d] => [%d][%d] %s [", i, refNode->dataBlockId, refNode->slotId, + len += tsnprintf(str, sizeof(str), "UNIT[%d] => [%d][%d] %s [", i, refNode->dataBlockId, refNode->slotId, operatorTypeStr(unit->compare.optr)); } @@ -1912,7 +1912,7 @@ int32_t filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t optio (void)strncat(str, " && ", sizeof(str) - len - 1); len += 4; if (unit->compare.optr2 <= OP_TYPE_JSON_CONTAINS) { - len += snprintf(str + len, sizeof(str) - len, "[%d][%d] %s [", refNode->dataBlockId, + len += tsnprintf(str + len, sizeof(str) - len, "[%d][%d] %s [", refNode->dataBlockId, refNode->slotId, operatorTypeStr(unit->compare.optr2)); } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 341ce760f5..95c217f867 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -2068,7 +2068,7 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp case TSDB_DATA_TYPE_GEOMETRY: { if (inputType == TSDB_DATA_TYPE_BOOL) { // NOTE: snprintf will append '\0' at the end of string - int32_t len = snprintf(varDataVal(output), outputLen + TSDB_NCHAR_SIZE - VARSTR_HEADER_SIZE, "%.*s", + int32_t len = tsnprintf(varDataVal(output), outputLen + TSDB_NCHAR_SIZE - VARSTR_HEADER_SIZE, "%.*s", (int32_t)(outputLen - VARSTR_HEADER_SIZE), *(int8_t *)input ? "true" : "false"); varDataSetLen(output, len); } else if (inputType == TSDB_DATA_TYPE_BINARY) { @@ -2109,7 +2109,7 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp int32_t len; if (inputType == TSDB_DATA_TYPE_BOOL) { char tmp[8] = {0}; - len = snprintf(tmp, sizeof(tmp), "%.*s", outputCharLen, *(int8_t *)input ? "true" : "false"); + len = tsnprintf(tmp, sizeof(tmp), "%.*s", outputCharLen, *(int8_t *)input ? "true" : "false"); bool ret = taosMbsToUcs4(tmp, len, (TdUcs4 *)varDataVal(output), outputLen - VARSTR_HEADER_SIZE, &len); if (!ret) { code = TSDB_CODE_SCALAR_CONVERT_ERROR; @@ -4515,10 +4515,10 @@ int32_t histogramScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarP int32_t len; char buf[512] = {0}; if (!normalized) { - len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%" PRId64 "}", + len = tsnprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%" PRId64 "}", bins[k].lower, bins[k].upper, bins[k].count); } else { - len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%lf}", + len = tsnprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%lf}", bins[k].lower, bins[k].upper, bins[k].percentage); } varDataSetLen(buf, len); diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index a7c842172a..a086d1f367 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -734,7 +734,7 @@ int32_t vectorConvertToVarData(SSclVectorConvCtx *pCtx) { int64_t value = 0; GET_TYPED_DATA(value, int64_t, pCtx->inType, colDataGetData(pInputCol, i)); - int32_t len = snprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%" PRId64, value); + int32_t len = tsnprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%" PRId64, value); varDataLen(tmp) = len; if (pCtx->outType == TSDB_DATA_TYPE_NCHAR) { SCL_ERR_RET(varToNchar(tmp, pCtx->pOut, i, NULL)); @@ -751,7 +751,7 @@ int32_t vectorConvertToVarData(SSclVectorConvCtx *pCtx) { uint64_t value = 0; GET_TYPED_DATA(value, uint64_t, pCtx->inType, colDataGetData(pInputCol, i)); - int32_t len = snprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%" PRIu64, value); + int32_t len = tsnprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%" PRIu64, value); varDataLen(tmp) = len; if (pCtx->outType == TSDB_DATA_TYPE_NCHAR) { SCL_ERR_RET(varToNchar(tmp, pCtx->pOut, i, NULL)); @@ -768,7 +768,7 @@ int32_t vectorConvertToVarData(SSclVectorConvCtx *pCtx) { double value = 0; GET_TYPED_DATA(value, double, pCtx->inType, colDataGetData(pInputCol, i)); - int32_t len = snprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%lf", value); + int32_t len = tsnprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%lf", value); varDataLen(tmp) = len; if (pCtx->outType == TSDB_DATA_TYPE_NCHAR) { SCL_ERR_RET(varToNchar(tmp, pCtx->pOut, i, NULL)); diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c index 5eded97cde..89c7ce9e31 100644 --- a/source/os/src/osTimezone.c +++ b/source/os/src/osTimezone.c @@ -805,7 +805,7 @@ int32_t taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, i char *ppp = strchr(inTimezoneStr, ','); int indexStr; if (pp == NULL || ppp == NULL) { - indexStr = snprintf(winStr, sizeof(winStr), "TZ=UTC"); + indexStr = tsnprintf(winStr, sizeof(winStr), "TZ=UTC"); } else { memcpy(winStr, "TZ=", 3); pp++; diff --git a/utils/test/c/tmqSim.c b/utils/test/c/tmqSim.c index e2a09c4259..c045629d1f 100644 --- a/utils/test/c/tmqSim.c +++ b/utils/test/c/tmqSim.c @@ -1275,10 +1275,10 @@ void* ombProduceThreadFunc(void* param) { msgsOfSql = remainder; } int len = 0; - len += snprintf(sqlBuf + len, MAX_SQL_LEN - len, "insert into %s values ", ctbName); + len += tsnprintf(sqlBuf + len, MAX_SQL_LEN - len, "insert into %s values ", ctbName); for (int j = 0; j < msgsOfSql; j++) { int64_t timeStamp = taosGetTimestampNs(); - len += snprintf(sqlBuf + len, MAX_SQL_LEN - len, "(%" PRId64 ", \"%s\")", timeStamp, g_payload); + len += tsnprintf(sqlBuf + len, MAX_SQL_LEN - len, "(%" PRId64 ", \"%s\")", timeStamp, g_payload); sendMsgs++; pInfo->totalProduceMsgs++; }