From f8111bbf2d39d0a46d5f19da516965fe529ae340 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 23:56:28 +0000 Subject: [PATCH 01/45] build(deps): bump golang.org/x/crypto in /tools/keeper Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.21.0 to 0.31.0. - [Commits](https://github.com/golang/crypto/compare/v0.21.0...v0.31.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] --- tools/keeper/go.mod | 6 +++--- tools/keeper/go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tools/keeper/go.mod b/tools/keeper/go.mod index f8edf2709b..00a399768d 100644 --- a/tools/keeper/go.mod +++ b/tools/keeper/go.mod @@ -70,10 +70,10 @@ require ( github.com/ugorji/go/codec v1.2.12 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.23.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/tools/keeper/go.sum b/tools/keeper/go.sum index 8f6e9bd13a..f879c731ba 100644 --- a/tools/keeper/go.sum +++ b/tools/keeper/go.sum @@ -424,8 +424,8 @@ golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -573,8 +573,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -583,8 +583,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 48437191ee08f9c17a5669e9f088ad1e5d405cba Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 19 Dec 2024 16:26:17 +0800 Subject: [PATCH 02/45] fix(analytics): fix check return value error. --- source/util/src/tanalytics.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 68bbbb7e99..3812295381 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -478,11 +478,13 @@ static int32_t taosAnalJsonBufWriteStrUseCol(SAnalyticBuf *pBuf, const char *buf } if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) { - if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) { + int32_t ret = taosWriteFile(pBuf->filePtr, buf, bufLen); + if (ret < 0) { return terrno; } } else { - if (taosWriteFile(pBuf->pCols[colIndex].filePtr, buf, bufLen) != bufLen) { + int32_t ret = taosWriteFile(pBuf->pCols[colIndex].filePtr, buf, bufLen); + if (ret < 0) { return terrno; } } From 8b90e0cd8fdca5468945206a2bcc3bf2efed6662 Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 19 Dec 2024 09:11:55 +0000 Subject: [PATCH 03/45] fix/TD-33284-compact-coverage-add-kill-compact --- tests/system-test/0-others/compact.py | 84 +++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 tests/system-test/0-others/compact.py diff --git a/tests/system-test/0-others/compact.py b/tests/system-test/0-others/compact.py new file mode 100644 index 0000000000..e51926cc28 --- /dev/null +++ b/tests/system-test/0-others/compact.py @@ -0,0 +1,84 @@ +from util.log import * +from util.cases import * +from util.dnodes import * +from util.sql import * + +import socket +import taos + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug(f"start to excute {__file__}") + self.replicaVar = int(replicaVar) + + def run(self): + + tdSql.query("CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1 vgroups 1 replica 1;") + + tdSql.query("CREATE DATABASE power1 KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1 vgroups 1 replica 1;") + + #first + tdSql.query("compact database power;") + + tdLog.info("compact id:%d"%tdSql.queryResult[0][1]) + + tdSql.query("show compact %d;"%tdSql.queryResult[0][1]) + + tdLog.info("detail:%d"%tdSql.queryRows) + + #second + tdSql.query("compact database power1;") + + tdLog.info("compact id:%d"%tdSql.queryResult[0][1]) + + tdSql.query("show compact %d;"%tdSql.queryResult[0][1]) + + tdLog.info("detail:%d"%tdSql.queryRows) + + + #kill + tdSql.query("show compacts;") + number1 = tdSql.queryResult[0][0] + number2 = tdSql.queryResult[1][0] + + #first + tdLog.info("kill compact %d;"%number1) + tdSql.query("kill compact %d;"%number1) + + #second + tdLog.info("kill compact %d;"%number2) + tdSql.query("kill compact %d;"%number2) + + + #show + count = 0 + tdLog.info("query progress") + while count < 50: + tdSql.query("show compact %d;"%number1) + + row1 = tdSql.queryRows + + tdSql.query("show compact %d;"%number2) + + row2 = tdSql.queryRows + + tdLog.info("compact%d:detail count:%d"%(number1, row1)) + tdLog.info("compact%d:detail count:%d"%(number2, row2)) + + if row1 == 0 and row2 == 0 : + break + + time.sleep(1) + + count +=1 + tdLog.info("loop%d"%count) + + #self.Fun.executeSQL("kill compact %d"%tdSql.queryResult[0][1]) + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 290fb82e61d378a1f53e24f49b592355de583c27 Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 19 Dec 2024 09:31:53 +0000 Subject: [PATCH 04/45] add-compact-coverage-case --- tests/parallel_test/cases.task | 2 +- tests/system-test/0-others/compact.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 00a17c8c96..dbcf002bf6 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -402,7 +402,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/persisit_config.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/qmemCtrl.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/compact_vgroups.py - +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/compact.py -N 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/composite_primary_key_create.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/composite_primary_key_insert.py diff --git a/tests/system-test/0-others/compact.py b/tests/system-test/0-others/compact.py index e51926cc28..eb2938e399 100644 --- a/tests/system-test/0-others/compact.py +++ b/tests/system-test/0-others/compact.py @@ -71,9 +71,10 @@ class TDTestCase: time.sleep(1) count +=1 - tdLog.info("loop%d"%count) + #tdLog.info("loop%d"%count) - #self.Fun.executeSQL("kill compact %d"%tdSql.queryResult[0][1]) + if row1 != 0 or row2 != 0: + tdLog.exit("compact failed") def stop(self): From 9715bb59fe9e3608fba7ab350f5ad1d728a50c6c Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 20 Dec 2024 11:33:26 +0800 Subject: [PATCH 05/45] fix/TD-33294-compact-coverage-fix-case --- source/dnode/mnode/impl/src/mndCompact.c | 9 +++++++-- source/dnode/vnode/src/vnd/vnodeSvr.c | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndCompact.c b/source/dnode/mnode/impl/src/mndCompact.c index e8f7202986..33a6ddcc5d 100644 --- a/source/dnode/mnode/impl/src/mndCompact.c +++ b/source/dnode/mnode/impl/src/mndCompact.c @@ -348,6 +348,7 @@ static void *mndBuildKillCompactReq(SMnode *pMnode, SVgObj *pVgroup, int32_t *pC req.compactId = compactId; req.vgId = pVgroup->vgId; req.dnodeId = dnodeid; + terrno = 0; mInfo("vgId:%d, build compact vnode config req", pVgroup->vgId); int32_t contLen = tSerializeSVKillCompactReq(NULL, 0, &req); @@ -367,8 +368,10 @@ static void *mndBuildKillCompactReq(SMnode *pMnode, SVgObj *pVgroup, int32_t *pC pHead->contLen = htonl(contLen); pHead->vgId = htonl(pVgroup->vgId); - if ((contLen = tSerializeSVKillCompactReq((char *)pReq + sizeof(SMsgHead), contLen, &req)) < 0) { - terrno = contLen; + mTrace("vgId:%d, build compact vnode config req, contLen:%d", pVgroup->vgId, contLen); + int32_t ret = 0; + if ((ret = tSerializeSVKillCompactReq((char *)pReq + sizeof(SMsgHead), contLen, &req)) < 0) { + terrno = ret; return NULL; } *pContLen = contLen; @@ -401,6 +404,8 @@ static int32_t mndAddKillCompactAction(SMnode *pMnode, STrans *pTrans, SVgObj *p action.contLen = contLen; action.msgType = TDMT_VND_KILL_COMPACT; + mTrace("trans:%d, kill compact msg len:%d", pTrans->id, contLen); + if ((code = mndTransAppendRedoAction(pTrans, &action)) != 0) { taosMemoryFree(pReq); TAOS_RETURN(code); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index e82209e03f..6291e0c9c7 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -607,9 +607,9 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg } vDebug("vgId:%d, start to process write request %s, index:%" PRId64 ", applied:%" PRId64 ", state.applyTerm:%" PRId64 - ", conn.applyTerm:%" PRId64, + ", conn.applyTerm:%" PRId64 ", contLen:%d", TD_VID(pVnode), TMSG_INFO(pMsg->msgType), ver, pVnode->state.applied, pVnode->state.applyTerm, - pMsg->info.conn.applyTerm); + pMsg->info.conn.applyTerm, pMsg->contLen); if (!(pVnode->state.applyTerm <= pMsg->info.conn.applyTerm)) { return terrno = TSDB_CODE_INTERNAL_ERROR; From d927e8c31a35f719c66c9bab0f4887839b8fe132 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 20 Dec 2024 16:50:49 +0800 Subject: [PATCH 06/45] fix:[TD-33272]refactor code --- source/dnode/vnode/src/tq/tqOffset.c | 29 +- source/dnode/vnode/src/tq/tqSnapshot.c | 226 +++++++------- source/libs/parser/src/parInsertSml.c | 391 +++++++++---------------- source/os/src/osString.c | 2 - 4 files changed, 260 insertions(+), 388 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index c42959971b..4d90091701 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -40,13 +40,11 @@ int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name) { return TSDB_CODE_INVALID_MSG; } int32_t code = TDB_CODE_SUCCESS; + int32_t lino = 0; void* pMemBuf = NULL; TdFilePtr pFile = taosOpenFile(name, TD_FILE_READ); - if (pFile == NULL) { - code = TDB_CODE_SUCCESS; - goto END; - } + TSDB_CHECK_NULL(pFile, code, lino, END, TDB_CODE_SUCCESS); int64_t ret = 0; int32_t size = 0; @@ -60,24 +58,16 @@ int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name) { } total += INT_BYTES; size = htonl(size); - if (size <= 0) { - code = TSDB_CODE_INVALID_MSG; - goto END; - } - pMemBuf = taosMemoryCalloc(1, size); - if (pMemBuf == NULL) { - code = terrno; - goto END; - } + TSDB_CHECK_CONDITION(size > 0, code, lino, END, TSDB_CODE_INVALID_MSG); - if (taosReadFile(pFile, pMemBuf, size) != size) { - terrno = TSDB_CODE_INVALID_MSG; - goto END; - } + pMemBuf = taosMemoryCalloc(1, size); + TSDB_CHECK_NULL(pMemBuf, code, lino, END, terrno); + TSDB_CHECK_CONDITION(taosReadFile(pFile, pMemBuf, size) == size, code, lino, END, TSDB_CODE_INVALID_MSG); total += size; STqOffset offset = {0}; - TQ_ERR_GO_TO_END(tqMetaDecodeOffsetInfo(&offset, pMemBuf, size)); + code = tqMetaDecodeOffsetInfo(&offset, pMemBuf, size); + TSDB_CHECK_CODE(code, lino, END); code = taosHashPut(pTq->pOffset, offset.subKey, strlen(offset.subKey), &offset, sizeof(STqOffset)); if (code != TDB_CODE_SUCCESS) { tDeleteSTqOffset(&offset); @@ -100,6 +90,9 @@ int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name) { } END: + if (code != 0){ + tqError("%s failed at %d since %s", __func__, lino, tstrerror(code)); + } taosCloseFile(&pFile); taosMemoryFree(pMemBuf); diff --git a/source/dnode/vnode/src/tq/tqSnapshot.c b/source/dnode/vnode/src/tq/tqSnapshot.c index 219ea4b6b4..ddbbba57a0 100644 --- a/source/dnode/vnode/src/tq/tqSnapshot.c +++ b/source/dnode/vnode/src/tq/tqSnapshot.c @@ -27,18 +27,16 @@ struct STqSnapReader { }; int32_t tqSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, int8_t type, STqSnapReader** ppReader) { - if (pTq == NULL || ppReader == NULL) { - return TSDB_CODE_INVALID_MSG; - } - int32_t code = 0; + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; STqSnapReader* pReader = NULL; + TSDB_CHECK_NULL(pTq, code, lino, end, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_NULL(ppReader, code, lino, end, TSDB_CODE_INVALID_MSG); // alloc pReader = (STqSnapReader*)taosMemoryCalloc(1, sizeof(STqSnapReader)); - if (pReader == NULL) { - code = terrno; - goto _err; - } + TSDB_CHECK_NULL(pReader, code, lino, end, terrno); + pReader->pTq = pTq; pReader->sver = sver; pReader->ever = ever; @@ -54,28 +52,21 @@ int32_t tqSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, int8_t type, STqS pTb = pTq->pOffsetStore; } else { code = TSDB_CODE_INVALID_MSG; - goto _err; + goto end; } code = tdbTbcOpen(pTb, &pReader->pCur, NULL); - if (code) { - taosMemoryFree(pReader); - goto _err; - } - + TSDB_CHECK_CODE(code, lino, end); code = tdbTbcMoveToFirst(pReader->pCur); - if (code) { - taosMemoryFree(pReader); - goto _err; + TSDB_CHECK_CODE(code, lino, end); + tqInfo("vgId:%d, vnode tq snapshot reader opene success", TD_VID(pTq->pVnode)); + *ppReader = pReader; + +end: + if (code != 0){ + tqError("%s failed at %d, vnode tq snapshot reader open failed since %s", __func__, lino, tstrerror(code)); + taosMemoryFreeClear(pReader); } - tqInfo("vgId:%d, vnode snapshot tq reader opened", TD_VID(pTq->pVnode)); - - *ppReader = pReader; - return code; - -_err: - tqError("vgId:%d, vnode snapshot tq reader open failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); - *ppReader = NULL; return code; } @@ -84,45 +75,37 @@ void tqSnapReaderClose(STqSnapReader** ppReader) { return; } tdbTbcClose((*ppReader)->pCur); - taosMemoryFree(*ppReader); - *ppReader = NULL; + taosMemoryFreeClear(*ppReader); } int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData) { - if (pReader == NULL || ppData == NULL) { - return TSDB_CODE_INVALID_MSG; - } - int32_t code = 0; - void* pKey = NULL; - void* pVal = NULL; - int32_t kLen = 0; - int32_t vLen = 0; + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + void* pKey = NULL; + void* pVal = NULL; + int32_t kLen = 0; + int32_t vLen = 0; + TSDB_CHECK_NULL(pReader, code, lino, end, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_NULL(ppData, code, lino, end, TSDB_CODE_INVALID_MSG); - if (tdbTbcNext(pReader->pCur, &pKey, &kLen, &pVal, &vLen)) { - goto _exit; - } + code = tdbTbcNext(pReader->pCur, &pKey, &kLen, &pVal, &vLen); + TSDB_CHECK_CODE(code, lino, end); *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + vLen); - if (*ppData == NULL) { - code = TAOS_GET_TERRNO(TSDB_CODE_OUT_OF_MEMORY); - goto _err; - } + TSDB_CHECK_NULL(*ppData, code, lino, end, terrno); SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData); pHdr->type = pReader->type; pHdr->size = vLen; (void)memcpy(pHdr->data, pVal, vLen); + tqInfo("vgId:%d, vnode tq snapshot read data, vLen:%d", TD_VID(pReader->pTq->pVnode), vLen); -_exit: +end: + if (code != 0) { + tqError("%s failed at %d, vnode tq snapshot read data failed since %s", __func__, lino, tstrerror(code)); + } tdbFree(pKey); tdbFree(pVal); - tqInfo("vgId:%d, vnode snapshot tq read data, vLen:%d", TD_VID(pReader->pTq->pVnode), vLen); - return code; - -_err: - tdbFree(pKey); - tdbFree(pVal); - tqError("vgId:%d, vnode snapshot tq read data failed since %s", TD_VID(pReader->pTq->pVnode), tstrerror(code)); return code; } @@ -135,135 +118,148 @@ struct STqSnapWriter { }; int32_t tqSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqSnapWriter** ppWriter) { - if (pTq == NULL || ppWriter == NULL) { - return TSDB_CODE_INVALID_MSG; - } - int32_t code = 0; + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; STqSnapWriter* pWriter = NULL; + TSDB_CHECK_NULL(pTq, code, lino, end, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_NULL(ppWriter, code, lino, end, TSDB_CODE_INVALID_MSG); + // alloc pWriter = (STqSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter)); - if (pWriter == NULL) { - code = TAOS_GET_TERRNO(TSDB_CODE_OUT_OF_MEMORY); - ; - goto _err; - } + TSDB_CHECK_NULL(pWriter, code, lino, end, terrno); pWriter->pTq = pTq; pWriter->sver = sver; pWriter->ever = ever; code = tdbBegin(pTq->pMetaDB, &pWriter->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, 0); - if (code < 0) { - taosMemoryFree(pWriter); - goto _err; - } - + TSDB_CHECK_CODE(code, lino, end); + tqInfo("vgId:%d, tq snapshot writer opene success", TD_VID(pTq->pVnode)); *ppWriter = pWriter; - return code; -_err: - tqError("vgId:%d, tq snapshot writer open failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); - *ppWriter = NULL; +end: + if (code != 0){ + tqError("%s failed at %d tq snapshot writer open failed since %s", __func__, lino, tstrerror(code)); + taosMemoryFreeClear(pWriter); + } return code; } int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback) { - if (ppWriter == NULL || *ppWriter == NULL) { - return TSDB_CODE_INVALID_MSG; - } - int32_t code = 0; - STqSnapWriter* pWriter = *ppWriter; - STQ* pTq = pWriter->pTq; + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + STqSnapWriter* pWriter = NULL; + + TSDB_CHECK_NULL(ppWriter, code, lino, end, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_NULL(*ppWriter, code, lino, end, TSDB_CODE_INVALID_MSG); + pWriter = *ppWriter; if (rollback) { tdbAbort(pWriter->pTq->pMetaDB, pWriter->txn); } else { code = tdbCommit(pWriter->pTq->pMetaDB, pWriter->txn); - if (code) goto _err; + TSDB_CHECK_CODE(code, lino, end); + code = tdbPostCommit(pWriter->pTq->pMetaDB, pWriter->txn); - if (code) goto _err; + TSDB_CHECK_CODE(code, lino, end); } + tqInfo("vgId:%d, tq snapshot writer close success", TD_VID(pWriter->pTq->pVnode)); + taosMemoryFreeClear(*ppWriter); - taosMemoryFree(pWriter); - *ppWriter = NULL; - - return code; - -_err: - tqError("vgId:%d, tq snapshot writer close failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); +end: + if (code != 0){ + tqError("%s failed at %d, tq snapshot writer close failed since %s", __func__, lino, tstrerror(code)); + } return code; } -int32_t tqSnapHandleWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - if (pWriter == NULL || pData == NULL || nData < sizeof(SSnapDataHdr)) { - return TSDB_CODE_INVALID_MSG; +static int32_t tqWriteCheck(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData){ + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + TSDB_CHECK_NULL(pWriter, code, lino, end, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_NULL(pData, code, lino, end, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_CONDITION(nData >= sizeof(SSnapDataHdr), code, lino, end, TSDB_CODE_INVALID_MSG); +end: + if (code != 0){ + tqError("%s failed at %d failed since %s", __func__, lino, tstrerror(code)); } - int32_t code = 0; - STQ* pTq = pWriter->pTq; + return code; +} +int32_t tqSnapHandleWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SDecoder decoder = {0}; SDecoder* pDecoder = &decoder; STqHandle handle = {0}; + code = tqWriteCheck(pWriter, pData, nData); + TSDB_CHECK_CODE(code, lino, end); + STQ* pTq = pWriter->pTq; tDecoderInit(pDecoder, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); code = tDecodeSTqHandle(pDecoder, &handle); - if (code) goto end; + TSDB_CHECK_CODE(code, lino, end); + taosWLockLatch(&pTq->lock); code = tqMetaSaveInfo(pTq, pTq->pExecStore, handle.subKey, strlen(handle.subKey), pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); taosWUnLockLatch(&pTq->lock); + TSDB_CHECK_CODE(code, lino, end); + tqInfo("vgId:%d, vnode tq snapshot write success", TD_VID(pTq->pVnode)); end: tDecoderClear(pDecoder); tqDestroyTqHandle(&handle); - tqInfo("vgId:%d, vnode snapshot tq write result:%d", TD_VID(pTq->pVnode), code); + if (code != 0){ + tqError("%s failed at %d, vnode tq snapshot write failed since %s", __func__, lino, tstrerror(code)); + } return code; } int32_t tqSnapCheckInfoWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - if (pWriter == NULL || pData == NULL || nData < sizeof(SSnapDataHdr)) { - return TSDB_CODE_INVALID_MSG; - } - int32_t code = 0; + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + code = tqWriteCheck(pWriter, pData, nData); + TSDB_CHECK_CODE(code, lino, end); + STQ* pTq = pWriter->pTq; STqCheckInfo info = {0}; code = tqMetaDecodeCheckInfo(&info, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); - if (code != 0) { - goto _err; - } + TSDB_CHECK_CODE(code, lino, end); code = tqMetaSaveInfo(pTq, pTq->pCheckStore, &info.topic, strlen(info.topic), pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); tDeleteSTqCheckInfo(&info); - if (code) goto _err; + TSDB_CHECK_CODE(code, lino, end); + tqInfo("vgId:%d, vnode tq check info write success", TD_VID(pTq->pVnode)); - return code; - -_err: - tqError("vgId:%d, vnode check info tq write failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); +end: + if (code != 0){ + tqError("%s failed at %d, vnode tq check info write failed since %s", __func__, lino, tstrerror(code)); + } return code; } int32_t tqSnapOffsetWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - if (pWriter == NULL || pData == NULL || nData < sizeof(SSnapDataHdr)) { - return TSDB_CODE_INVALID_MSG; - } - int32_t code = 0; - STQ* pTq = pWriter->pTq; + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + code = tqWriteCheck(pWriter, pData, nData); + TSDB_CHECK_CODE(code, lino, end); + STQ* pTq = pWriter->pTq; STqOffset info = {0}; code = tqMetaDecodeOffsetInfo(&info, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); - if (code != 0) { - goto _err; - } + TSDB_CHECK_CODE(code, lino, end); code = tqMetaSaveInfo(pTq, pTq->pOffsetStore, info.subKey, strlen(info.subKey), pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); tDeleteSTqOffset(&info); - if (code) goto _err; + TSDB_CHECK_CODE(code, lino, end); + tqInfo("vgId:%d, vnode tq offset write success", TD_VID(pTq->pVnode)); - return code; - -_err: - tqError("vgId:%d, vnode check info tq write failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); +end: + if (code != 0){ + tqError("%s failed at %d, vnode tq offset write failed since %s", __func__, lino, tstrerror(code)); + } return code; } diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index 22d1f7edda..d56cf7916f 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -20,40 +20,32 @@ int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf, int32_t msgBufLen) { - SMsgBuf msg = {.buf = msgBuf, .len = msgBufLen}; - SToken sToken; - int32_t code = 0; - char* tbName = NULL; + SMsgBuf msg = {.buf = msgBuf, .len = msgBufLen}; + SToken sToken = {0}; + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; NEXT_TOKEN(pTableName, sToken); - - if (sToken.n == 0) { - return buildInvalidOperationMsg(&msg, "empty table name"); - } - + TSDB_CHECK_CONDITION(sToken.n != 0, code, lino, end, TSDB_CODE_TSC_INVALID_OPERATION); code = insCreateSName(pName, &sToken, acctId, dbName, &msg); - if (code) { - return code; - } - + TSDB_CHECK_CODE(code, lino, end); NEXT_TOKEN(pTableName, sToken); + TSDB_CHECK_CONDITION(sToken.n <= 0, code, lino, end, TSDB_CODE_TSC_INVALID_OPERATION); - if (sToken.n > 0) { - return buildInvalidOperationMsg(&msg, "table name format is wrong"); +end: + if (code != 0){ + uError("%s failed at %d since %s", __func__, lino, tstrerror(code)); } - return TSDB_CODE_SUCCESS; } static int32_t smlBoundColumnData(SArray* cols, SBoundColInfo* pBoundInfo, SSchema* pSchema, bool isTag) { + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; bool* pUseCols = taosMemoryCalloc(pBoundInfo->numOfCols, sizeof(bool)); - if (NULL == pUseCols) { - return terrno; - } - + TSDB_CHECK_NULL(pUseCols, code, lino, end, terrno); pBoundInfo->numOfBound = 0; int16_t lastColIdx = -1; // last column found - int32_t code = TSDB_CODE_SUCCESS; for (int i = 0; i < taosArrayGetSize(cols); ++i) { SSmlKv* kv = taosArrayGet(cols, i); @@ -65,16 +57,9 @@ static int32_t smlBoundColumnData(SArray* cols, SBoundColInfo* pBoundInfo, SSche index = insFindCol(&sToken, 0, t, pSchema); } - if (index < 0) { - uError("smlBoundColumnData. index:%d", index); - code = TSDB_CODE_SML_INVALID_DATA; - goto end; - } - if (pUseCols[index]) { - uError("smlBoundColumnData. already set. index:%d", index); - code = TSDB_CODE_SML_INVALID_DATA; - goto end; - } + TSDB_CHECK_CONDITION(index >= 0, code, lino, end, TSDB_CODE_SML_INVALID_DATA); + TSDB_CHECK_CONDITION(!pUseCols[index], code, lino, end, TSDB_CODE_SML_INVALID_DATA); + lastColIdx = index; pUseCols[index] = true; pBoundInfo->pColIndex[pBoundInfo->numOfBound] = index; @@ -82,11 +67,30 @@ static int32_t smlBoundColumnData(SArray* cols, SBoundColInfo* pBoundInfo, SSche } end: + if (code != 0){ + uError("%s failed at %d since %s", __func__, lino, tstrerror(code)); + } taosMemoryFree(pUseCols); - return code; } +static int32_t smlMbsToUcs4(const char* mbs, size_t mbsLen, void** result, int32_t* resultLen, int32_t maxLen, void* charsetCxt){ + int code = TSDB_CODE_SUCCESS; + void* pUcs4 = NULL; + int32_t lino = 0; + pUcs4 = taosMemoryCalloc(1, maxLen); + TSDB_CHECK_NULL(pUcs4, code, lino, end, terrno); + TSDB_CHECK_CONDITION(taosMbsToUcs4(mbs, mbsLen, (TdUcs4*)pUcs4, maxLen, resultLen, charsetCxt), code, lino, end, terrno); + *result = pUcs4; + pUcs4 = NULL; + +end: + if (code != 0){ + uError("%s failed at %d since %s", __func__, lino, tstrerror(code)); + } + taosMemoryFree(pUcs4); + return code; +} /** * @brief No json tag for schemaless * @@ -99,75 +103,39 @@ end: */ static int32_t smlBuildTagRow(SArray* cols, SBoundColInfo* tags, SSchema* pSchema, STag** ppTag, SArray** tagName, SMsgBuf* msg, void* charsetCxt) { - SArray* pTagArray = taosArrayInit(tags->numOfBound, sizeof(STagVal)); - if (!pTagArray) { - return terrno; - } + int code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SArray* pTagArray = taosArrayInit(tags->numOfBound, sizeof(STagVal)); + TSDB_CHECK_NULL(pTagArray, code, lino, end, terrno); *tagName = taosArrayInit(8, TSDB_COL_NAME_LEN); - if (!*tagName) { - return terrno; - } + TSDB_CHECK_NULL(*tagName, code, lino, end, terrno); - int32_t code = TSDB_CODE_SUCCESS; for (int i = 0; i < tags->numOfBound; ++i) { SSchema* pTagSchema = &pSchema[tags->pColIndex[i]]; SSmlKv* kv = taosArrayGet(cols, i); - if (kv == NULL){ - code = terrno; - uError("SML smlBuildTagRow error kv is null"); - goto end; - } - if (kv->keyLen != strlen(pTagSchema->name) || memcmp(kv->key, pTagSchema->name, kv->keyLen) != 0 || - kv->type != pTagSchema->type) { - code = TSDB_CODE_SML_INVALID_DATA; - uError("SML smlBuildTagRow error col not same %s", pTagSchema->name); - goto end; - } - - if (taosArrayPush(*tagName, pTagSchema->name) == NULL){ - code = terrno; - uError("SML smlBuildTagRow error push tag name"); - goto end; - } + TSDB_CHECK_NULL(kv, code, lino, end, terrno); + bool cond = (kv->keyLen == strlen(pTagSchema->name) && memcmp(kv->key, pTagSchema->name, kv->keyLen) == 0 && kv->type == pTagSchema->type); + TSDB_CHECK_CONDITION(cond, code, lino, end, TSDB_CODE_SML_INVALID_DATA); + TSDB_CHECK_NULL(taosArrayPush(*tagName, pTagSchema->name), code, lino, end, terrno); STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type}; - // strcpy(val.colName, pTagSchema->name); if (pTagSchema->type == TSDB_DATA_TYPE_BINARY || pTagSchema->type == TSDB_DATA_TYPE_VARBINARY || pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) { val.pData = (uint8_t*)kv->value; val.nData = kv->length; } else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) { - int32_t output = 0; - void* p = taosMemoryCalloc(1, kv->length * TSDB_NCHAR_SIZE); - if (p == NULL) { - code = terrno; - goto end; - } - if (!taosMbsToUcs4(kv->value, kv->length, (TdUcs4*)(p), kv->length * TSDB_NCHAR_SIZE, &output, charsetCxt)) { - if (terrno == TAOS_SYSTEM_ERROR(E2BIG)) { - taosMemoryFree(p); - code = generateSyntaxErrMsg(msg, TSDB_CODE_PAR_VALUE_TOO_LONG, pTagSchema->name); - goto end; - } - char buf[512] = {0}; - (void)snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(terrno)); - taosMemoryFree(p); - code = buildSyntaxErrMsg(msg, buf, kv->value); - goto end; - } - val.pData = p; - val.nData = output; + code = smlMbsToUcs4(kv->value, kv->length, (void**)&val.pData, &val.nData, kv->length * TSDB_NCHAR_SIZE, charsetCxt); + TSDB_CHECK_CODE(code, lino, end); } else { (void)memcpy(&val.i64, &(kv->value), kv->length); } - if (taosArrayPush(pTagArray, &val) == NULL){ - code = terrno; - uError("SML smlBuildTagRow error push tag array"); - goto end; - } + TSDB_CHECK_NULL(taosArrayPush(pTagArray, &val), code, lino, end, terrno); } - code = tTagNew(pTagArray, 1, false, ppTag); + end: + if (code != 0){ + uError("%s failed at %d since %s", __func__, lino, tstrerror(code)); + } for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) { STagVal* p = (STagVal*)taosArrayGet(pTagArray, i); if (p->type == TSDB_DATA_TYPE_NCHAR) { @@ -179,18 +147,20 @@ end: } int32_t smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta, STableDataCxt** cxt) { + int ret = TSDB_CODE_SUCCESS; + int32_t lino = 0; SVCreateTbReq* pCreateTbReq = NULL; - int ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, + ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid), pTableMeta, &pCreateTbReq, cxt, false, false); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - + TSDB_CHECK_CODE(ret, lino, end); ret = initTableColSubmitData(*cxt); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + TSDB_CHECK_CODE(ret, lino, end); + +end: + if (ret != 0){ + uError("%s failed at %d since %s", __func__, lino, tstrerror(ret)); } - return TSDB_CODE_SUCCESS; + return ret; } void clearColValArraySml(SArray* pCols) { @@ -207,78 +177,51 @@ void clearColValArraySml(SArray* pCols) { } int32_t smlBuildRow(STableDataCxt* pTableCxt) { - SRow** pRow = taosArrayReserve(pTableCxt->pData->aRowP, 1); - if (pRow == NULL){ - return terrno; - } - int ret = tRowBuild(pTableCxt->pValues, pTableCxt->pSchema, pRow); - if (TSDB_CODE_SUCCESS != ret) { - return ret; - } + int ret = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SRow** pRow = taosArrayReserve(pTableCxt->pData->aRowP, 1); + TSDB_CHECK_NULL(pRow, ret, lino, end, terrno); + ret = tRowBuild(pTableCxt->pValues, pTableCxt->pSchema, pRow); + TSDB_CHECK_CODE(ret, lino, end); SRowKey key; tRowGetKey(*pRow, &key); insCheckTableDataOrder(pTableCxt, &key); - return TSDB_CODE_SUCCESS; +end: + if (ret != 0){ + uError("%s failed at %d since %s", __func__, lino, tstrerror(ret)); + } + return ret; } int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* data, int32_t index, void* charsetCxt) { int ret = TSDB_CODE_SUCCESS; + int32_t lino = 0; SSchema* pColSchema = schema + index; SColVal* pVal = taosArrayGet(pTableCxt->pValues, index); - if (pVal == NULL) { - return TSDB_CODE_SUCCESS; - } + TSDB_CHECK_NULL(pVal, ret, lino, end, TSDB_CODE_SUCCESS); SSmlKv* kv = (SSmlKv*)data; if (kv->keyLen != strlen(pColSchema->name) || memcmp(kv->key, pColSchema->name, kv->keyLen) != 0 || kv->type != pColSchema->type) { ret = TSDB_CODE_SML_INVALID_DATA; char* tmp = taosMemoryCalloc(kv->keyLen + 1, 1); - if (tmp) { - (void)memcpy(tmp, kv->key, kv->keyLen); - uInfo("SML data(name:%s type:%s) is not same like the db data(name:%s type:%s)", tmp, tDataTypes[kv->type].name, - pColSchema->name, tDataTypes[pColSchema->type].name); - taosMemoryFree(tmp); - } else { - uError("SML smlBuildCol out of memory"); - ret = terrno; - } + TSDB_CHECK_NULL(tmp, ret, lino, end, terrno); + (void)memcpy(tmp, kv->key, kv->keyLen); + uInfo("SML data(name:%s type:%s) is not same like the db data(name:%s type:%s)", tmp, tDataTypes[kv->type].name, + pColSchema->name, tDataTypes[pColSchema->type].name); + taosMemoryFree(tmp); goto end; } if (kv->type == TSDB_DATA_TYPE_NCHAR) { - int32_t len = 0; - int64_t size = pColSchema->bytes - VARSTR_HEADER_SIZE; - if (size <= 0) { - ret = TSDB_CODE_SML_INVALID_DATA; - goto end; - } - char* pUcs4 = taosMemoryCalloc(1, size); - if (NULL == pUcs4) { - ret = terrno; - goto end; - } - if (!taosMbsToUcs4(kv->value, kv->length, (TdUcs4*)pUcs4, size, &len, charsetCxt)) { - if (terrno == TAOS_SYSTEM_ERROR(E2BIG)) { - taosMemoryFree(pUcs4); - ret = TSDB_CODE_PAR_VALUE_TOO_LONG; - goto end; - } - taosMemoryFree(pUcs4); - ret = TSDB_CODE_TSC_INVALID_VALUE; - goto end; - } - pVal->value.pData = pUcs4; - pVal->value.nData = len; + ret = smlMbsToUcs4(kv->value, kv->length, (void**)&pVal->value.pData, &pVal->value.nData, pColSchema->bytes - VARSTR_HEADER_SIZE, charsetCxt); + TSDB_CHECK_CODE(ret, lino, end); } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY || kv->type == TSDB_DATA_TYPE_VARBINARY) { pVal->value.nData = kv->length; pVal->value.pData = taosMemoryMalloc(kv->length); - if (!pVal->value.pData) { - ret = terrno; - uError("SML smlBuildCol malloc failed %s:%d, err: %s", __func__, __LINE__, tstrerror(ret)); - goto end; - } + TSDB_CHECK_NULL(pVal->value.pData, ret, lino, end, terrno); + (void)memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { (void)memcpy(&pVal->value.val, &(kv->value), kv->length); @@ -286,12 +229,17 @@ int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* data, int32 pVal->flag = CV_FLAG_VALUE; end: + if (ret != 0){ + uError("%s failed at %d since %s", __func__, lino, tstrerror(ret)); + } return ret; } int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols, STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int32_t msgBufLen, void* charsetCxt) { + int32_t lino = 0; + int32_t ret = 0; SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen}; SSchema* pTagsSchema = getTableTagSchema(pTableMeta); @@ -299,50 +247,32 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc SVCreateTbReq* pCreateTblReq = NULL; SArray* tagName = NULL; - int ret = insInitBoundColsInfo(getNumOfTags(pTableMeta), &bindTags); - if (ret != TSDB_CODE_SUCCESS) { - ret = buildInvalidOperationMsg(&pBuf, "init bound cols error"); - goto end; - } + ret = insInitBoundColsInfo(getNumOfTags(pTableMeta), &bindTags); + TSDB_CHECK_CODE(ret, lino, end); ret = smlBoundColumnData(tags, &bindTags, pTagsSchema, true); - if (ret != TSDB_CODE_SUCCESS) { - ret = buildInvalidOperationMsg(&pBuf, "bound tags error"); - goto end; - } + TSDB_CHECK_CODE(ret, lino, end); STag* pTag = NULL; - ret = smlBuildTagRow(tags, &bindTags, pTagsSchema, &pTag, &tagName, &pBuf, charsetCxt); - if (ret != TSDB_CODE_SUCCESS) { - goto end; - } + TSDB_CHECK_CODE(ret, lino, end); pCreateTblReq = taosMemoryCalloc(1, sizeof(SVCreateTbReq)); - if (NULL == pCreateTblReq) { - ret = terrno; - goto end; - } + TSDB_CHECK_NULL(pCreateTblReq, ret, lino, end, terrno); + ret = insBuildCreateTbReq(pCreateTblReq, tableName, pTag, pTableMeta->suid, NULL, tagName, pTableMeta->tableInfo.numOfTags, ttl); - if (TSDB_CODE_SUCCESS != ret) { - goto end; - } + TSDB_CHECK_CODE(ret, lino, end); pCreateTblReq->ctb.stbName = taosMemoryCalloc(1, sTableNameLen + 1); - if (pCreateTblReq->ctb.stbName == NULL){ - ret = terrno; - goto end; - } + TSDB_CHECK_NULL(pCreateTblReq->ctb.stbName, ret, lino, end, terrno); + (void)memcpy(pCreateTblReq->ctb.stbName, sTableName, sTableNameLen); if (dataFormat) { STableDataCxt** pTableCxt = (STableDataCxt**)taosHashGet(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid)); - if (NULL == pTableCxt) { - ret = buildInvalidOperationMsg(&pBuf, "dataformat true. get tableDataCtx error"); - goto end; - } + TSDB_CHECK_NULL(pTableCxt, ret, lino, end, TSDB_CODE_TSC_INVALID_OPERATION); (*pTableCxt)->pData->flags |= SUBMIT_REQ_AUTO_CREATE_TABLE; (*pTableCxt)->pData->pCreateTbReq = pCreateTblReq; (*pTableCxt)->pMeta->uid = pTableMeta->uid; @@ -354,86 +284,47 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc STableDataCxt* pTableCxt = NULL; ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid), pTableMeta, &pCreateTblReq, &pTableCxt, false, false); - if (ret != TSDB_CODE_SUCCESS) { - ret = buildInvalidOperationMsg(&pBuf, "insGetTableDataCxt error"); - goto end; - } + TSDB_CHECK_CODE(ret, lino, end); SSchema* pSchema = getTableColumnSchema(pTableMeta); ret = smlBoundColumnData(colsSchema, &pTableCxt->boundColsInfo, pSchema, false); - if (ret != TSDB_CODE_SUCCESS) { - ret = buildInvalidOperationMsg(&pBuf, "bound cols error"); - goto end; - } + TSDB_CHECK_CODE(ret, lino, end); ret = initTableColSubmitData(pTableCxt); - if (ret != TSDB_CODE_SUCCESS) { - ret = buildInvalidOperationMsg(&pBuf, "initTableColSubmitData error"); - goto end; - } + TSDB_CHECK_CODE(ret, lino, end); int32_t rowNum = taosArrayGetSize(cols); - if (rowNum <= 0) { - ret = buildInvalidOperationMsg(&pBuf, "cols size <= 0"); - goto end; - } + TSDB_CHECK_CONDITION(rowNum > 0, ret, lino, end, TSDB_CODE_TSC_INVALID_OPERATION); for (int32_t r = 0; r < rowNum; ++r) { void* rowData = taosArrayGetP(cols, r); - if (rowData == NULL) { - ret = terrno; - goto end; - } + TSDB_CHECK_NULL(rowData, ret, lino, end, terrno); + // 1. set the parsed value from sql string for (int c = 0; c < pTableCxt->boundColsInfo.numOfBound; ++c) { SSchema* pColSchema = &pSchema[pTableCxt->boundColsInfo.pColIndex[c]]; SColVal* pVal = taosArrayGet(pTableCxt->pValues, pTableCxt->boundColsInfo.pColIndex[c]); - if (pVal == NULL) { - ret = terrno; - goto end; - } + TSDB_CHECK_NULL(pVal, ret, lino, end, terrno); void** p = taosHashGet(rowData, pColSchema->name, strlen(pColSchema->name)); if (p == NULL) { continue; } SSmlKv* kv = *(SSmlKv**)p; - if (kv->type != pColSchema->type) { - ret = buildInvalidOperationMsg(&pBuf, "kv type not equal to col type"); - goto end; - } + TSDB_CHECK_CONDITION(kv->type == pColSchema->type, ret, lino, end, TSDB_CODE_TSC_INVALID_OPERATION); + if (pColSchema->type == TSDB_DATA_TYPE_TIMESTAMP) { kv->i = convertTimePrecision(kv->i, TSDB_TIME_PRECISION_NANO, pTableMeta->tableInfo.precision); } if (kv->type == TSDB_DATA_TYPE_NCHAR) { - int32_t len = 0; - char* pUcs4 = taosMemoryCalloc(1, pColSchema->bytes - VARSTR_HEADER_SIZE); - if (NULL == pUcs4) { - ret = terrno; - goto end; - } - if (!taosMbsToUcs4(kv->value, kv->length, (TdUcs4*)pUcs4, pColSchema->bytes - VARSTR_HEADER_SIZE, &len, charsetCxt)) { - if (terrno == TAOS_SYSTEM_ERROR(E2BIG)) { - uError("sml bind taosMbsToUcs4 error, kv length:%d, bytes:%d, kv->value:%s", (int)kv->length, - pColSchema->bytes, kv->value); - (void)buildInvalidOperationMsg(&pBuf, "value too long"); - ret = TSDB_CODE_PAR_VALUE_TOO_LONG; - goto end; - } - ret = buildInvalidOperationMsg(&pBuf, strerror(terrno)); - goto end; - } - pVal->value.pData = pUcs4; - pVal->value.nData = len; + ret = smlMbsToUcs4(kv->value, kv->length, (void**)&pVal->value.pData, &pVal->value.nData, pColSchema->bytes - VARSTR_HEADER_SIZE, charsetCxt); + TSDB_CHECK_CODE(ret, lino, end); } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY || kv->type == TSDB_DATA_TYPE_VARBINARY) { pVal->value.nData = kv->length; pVal->value.pData = taosMemoryMalloc(kv->length); - if (NULL == pVal->value.pData) { - ret = terrno; - goto end; - } + TSDB_CHECK_NULL(pVal->value.pData, ret, lino, end, terrno); (void)memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { (void)memcpy(&pVal->value.val, &(kv->value), kv->length); @@ -442,22 +333,20 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc } SRow** pRow = taosArrayReserve(pTableCxt->pData->aRowP, 1); - if (NULL == pRow) { - ret = terrno; - goto end; - } + TSDB_CHECK_NULL(pRow, ret, lino, end, terrno); ret = tRowBuild(pTableCxt->pValues, pTableCxt->pSchema, pRow); - if (TSDB_CODE_SUCCESS != ret) { - ret = buildInvalidOperationMsg(&pBuf, "tRowBuild error"); - goto end; - } - SRowKey key; + TSDB_CHECK_CODE(ret, lino, end); + SRowKey key = {0}; tRowGetKey(*pRow, &key); insCheckTableDataOrder(pTableCxt, &key); clearColValArraySml(pTableCxt->pValues); } end: + if (ret != 0){ + uError("%s failed at %d since %s", __func__, lino, tstrerror(ret)); + buildInvalidOperationMsg(&pBuf, tstrerror(ret)); + } insDestroyBoundColInfo(&bindTags); tdDestroySVCreateTbReq(pCreateTblReq); taosMemoryFree(pCreateTblReq); @@ -466,29 +355,21 @@ end: } int32_t smlInitHandle(SQuery** query) { + int32_t lino = 0; + int32_t code = 0; *query = NULL; SQuery* pQuery = NULL; SVnodeModifyOpStmt* stmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_QUERY, (SNode**)&pQuery); - if (code != 0) { - uError("SML create pQuery error"); - goto END; - } + code = nodesMakeNode(QUERY_NODE_QUERY, (SNode**)&pQuery); + TSDB_CHECK_CODE(code, lino, end); pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; pQuery->haveResultSet = false; pQuery->msgType = TDMT_VND_SUBMIT; code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&stmt); - if (code != 0) { - uError("SML create SVnodeModifyOpStmt error"); - goto END; - } + TSDB_CHECK_CODE(code, lino, end); stmt->pTableBlockHashObj = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); - if (stmt->pTableBlockHashObj == NULL){ - uError("SML create pTableBlockHashObj error"); - code = terrno; - goto END; - } + TSDB_CHECK_NULL(stmt->pTableBlockHashObj, code, lino, end, terrno); stmt->freeHashFunc = insDestroyTableDataCxtHashMap; stmt->freeArrayFunc = insDestroyVgroupDataCxtList; @@ -496,24 +377,28 @@ int32_t smlInitHandle(SQuery** query) { *query = pQuery; return code; -END: +end: + if (code != 0) { + uError("%s failed at %d since %s", __func__, lino, tstrerror(code)); + } nodesDestroyNode((SNode*)stmt); qDestroyQuery(pQuery); return code; } int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash) { + int32_t lino = 0; + int32_t code = 0; + SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)(handle)->pRoot; - // merge according to vgId - int32_t code = insMergeTableDataCxt(pStmt->pTableBlockHashObj, &pStmt->pVgDataBlocks, true); - if (code != TSDB_CODE_SUCCESS) { - uError("insMergeTableDataCxt failed"); - return code; - } + code = insMergeTableDataCxt(pStmt->pTableBlockHashObj, &pStmt->pVgDataBlocks, true); + TSDB_CHECK_CODE(code, lino, end); code = insBuildVgDataBlocks(pVgHash, pStmt->pVgDataBlocks, &pStmt->pDataBlocks, false); - if (code != TSDB_CODE_SUCCESS) { - uError("insBuildVgDataBlocks failed"); - return code; + TSDB_CHECK_CODE(code, lino, end); + +end: + if (code != 0) { + uError("%s failed at %d since %s", __func__, lino, tstrerror(code)); } return code; } diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 380e9f84d3..1d07b64c70 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -405,8 +405,6 @@ bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4 size_t ucs4_input_len = mbsLength; size_t outLeft = ucs4_max_len; if (iconv(conv, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) { - char buf[512] = {0}; - snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s %d %d", strerror(terrno), errno, EILSEQ); terrno = TAOS_SYSTEM_ERROR(errno); taosReleaseConv(idx, conv, M2C, charsetCxt); return false; From a360b71f7c7919ef39debf6e962cb2ac1616c12c Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Fri, 20 Dec 2024 16:57:17 +0800 Subject: [PATCH 07/45] enh: udf exception test case --- source/libs/function/CMakeLists.txt | 44 ++++++++++ source/libs/function/test/change_udf.c | 108 +++++++++++++++++++++++++ tests/pytest/util/tserror.py | 10 +++ tests/system-test/0-others/udfTest.py | 97 ++++++++++++++++++++++ 4 files changed, 259 insertions(+) create mode 100644 source/libs/function/test/change_udf.c create mode 100644 tests/pytest/util/tserror.py diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index 6891653981..fad0a749d5 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -142,6 +142,50 @@ target_link_libraries( udf2_dup PUBLIC os ${LINK_JEMALLOC} ) +set(TARGET_NAMES + change_udf_normal + change_udf_no_init + change_udf_no_process + change_udf_no_destroy + change_udf_init_failed + change_udf_process_failed + change_udf_destory_failed +) + +set(COMPILE_DEFINITIONS + CHANGE_UDF_NORMAL + CHANGE_UDF_NO_INIT + CHANGE_UDF_NO_PROCESS + CHANGE_UDF_NO_DESTROY + CHANGE_UDF_INIT_FAILED + CHANGE_UDF_PROCESS_FAILED + CHANGE_UDF_DESTORY_FAILED +) + +foreach(index RANGE 0 6) + list(GET TARGET_NAMES ${index} target_name) + list(GET COMPILE_DEFINITIONS ${index} compile_def) + + add_library(${target_name} STATIC MODULE test/change_udf.c) + target_include_directories( + ${target_name} + PUBLIC + "${TD_SOURCE_DIR}/include/libs/function" + "${TD_SOURCE_DIR}/include/util" + "${TD_SOURCE_DIR}/include/common" + "${TD_SOURCE_DIR}/include/client" + "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + ) + target_compile_definitions(${target_name} PRIVATE ${compile_def}) + IF(TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEPENDENCIES(${target_name} jemalloc) + ENDIF() + target_link_libraries( + ${target_name} PUBLIC os ${LINK_JEMALLOC} + ) +endforeach() + # SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/build/bin) add_executable(udfd src/udfd.c) diff --git a/source/libs/function/test/change_udf.c b/source/libs/function/test/change_udf.c new file mode 100644 index 0000000000..822c4da892 --- /dev/null +++ b/source/libs/function/test/change_udf.c @@ -0,0 +1,108 @@ +#include +#include +#include +#ifdef LINUX +#include +#endif +#ifdef WINDOWS +#include +#endif +#include "taosudf.h" + +// rename function name +#ifdef CHANGE_UDF_NORMAL +#define UDFNAME change_udf_normal +#define UDFNAMEINIT change_udf_normal_init +#define UDFNAMEDESTROY change_udf_normal_destroy +#elif defined(CHANGE_UDF_NO_INIT) +#define UDFNAME change_udf_no_init +#define UDFNAMEINIT change_udf_no_init_init +#define UDFNAMEDESTROY change_udf_no_init_destroy +#elif defined(CHANGE_UDF_NO_PROCESS) +#define UDFNAME change_udf_no_process +#define UDFNAMEINIT change_udf_no_process_init +#define UDFNAMEDESTROY change_udf_no_process_destroy +#elif defined(CHANGE_UDF_NO_DESTROY) +#define UDFNAME change_udf_no_destroy +#define UDFNAMEINIT change_udf_no_destroy_init +#define UDFNAMEDESTROY change_udf_no_destroy_destroy +#elif defined(CHANGE_UDF_INIT_FAILED) +#define UDFNAME change_udf_init_failed +#define UDFNAMEINIT change_udf_init_failed_init +#define UDFNAMEDESTROY change_udf_init_failed_destroy +#elif defined(CHANGE_UDF_PROCESS_FAILED) +#define UDFNAME change_udf_process_failed +#define UDFNAMEINIT change_udf_process_failed_init +#define UDFNAMEDESTROY change_udf_process_failed_destroy +#elif defined(CHANGE_UDF_DESTORY_FAILED) +#define UDFNAME change_udf_destory_failed +#define UDFNAMEINIT change_udf_destory_failed_init +#define UDFNAMEDESTROY change_udf_destory_failed_destroy +#else +#define UDFNAME change_udf_normal +#define UDFNAMEINIT change_udf_normal_init +#define UDFNAMEDESTROY change_udf_normal_destroy +#endif + + +#ifdef CHANGE_UDF_NO_INIT +#else +DLL_EXPORT int32_t UDFNAMEINIT() { + #ifdef CHANGE_UDF_INIT_FAILED + return -1; + #else + return 0; + #endif // ifdef CHANGE_UDF_INIT_FAILED +} +#endif // ifdef CHANGE_UDF_NO_INIT + +#ifdef CHANGE_UDF_NO_DESTROY +#else +DLL_EXPORT int32_t UDFNAMEDESTROY() { + #ifdef CHANGE_UDF_DESTORY_FAILED + return -1; + #else + return 0; + #endif // ifdef CHANGE_UDF_DESTORY_FAILED + } +#endif // ifdef CHANGE_UDF_NO_DESTROY + +#ifdef CHANGE_UDF_NO_PROCESS +#else +DLL_EXPORT int32_t UDFNAME(SUdfDataBlock *block, SUdfColumn *resultCol) { + #ifdef CHANGE_UDF_PROCESS_FAILED + return -1; + #else + int32_t code = 0; + SUdfColumnData *resultData = &resultCol->colData; + for (int32_t i = 0; i < block->numOfRows; ++i) { + int j = 0; + for (; j < block->numOfCols; ++j) { + if (udfColDataIsNull(block->udfCols[j], i)) { + code = udfColDataSetNull(resultCol, i); + if (code != 0) { + return code; + } + break; + } + } + if (j == block->numOfCols) { + int32_t luckyNum = 1; + code = udfColDataSet(resultCol, i, (char *)&luckyNum, false); + if (code != 0) { + return code; + } + } + } + // to simulate actual processing delay by udf +#ifdef LINUX + usleep(1 * 1000); // usleep takes sleep time in us (1 millionth of a second) +#endif // ifdef LINUX +#ifdef WINDOWS + Sleep(1); +#endif // ifdef WINDOWS + resultData->numOfRows = block->numOfRows; + return 0; + #endif // ifdef CHANGE_UDF_PROCESS_FAILED +} +#endif // ifdef CHANGE_UDF_NO_PROCESS diff --git a/tests/pytest/util/tserror.py b/tests/pytest/util/tserror.py new file mode 100644 index 0000000000..c0f658209c --- /dev/null +++ b/tests/pytest/util/tserror.py @@ -0,0 +1,10 @@ +import ctypes + +TAOS_SYSTEM_ERROR = ctypes.c_int32(0x80ff0000).value +TAOS_DEF_ERROR_CODE = ctypes.c_int32(0x80000000).value + + +TSDB_CODE_MND_FUNC_NOT_EXIST = (TAOS_DEF_ERROR_CODE | 0x0374) + + +TSDB_CODE_UDF_FUNC_EXEC_FAILURE = (TAOS_DEF_ERROR_CODE | 0x290A) diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index d3efa61e04..9bc7d4360c 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -11,6 +11,7 @@ from util.log import * from util.sql import * from util.cases import * from util.dnodes import * +from util.tserror import * import subprocess class TDTestCase: @@ -56,8 +57,32 @@ class TDTestCase: else: self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") self.libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libchange_udf_no_init = subprocess.Popen('find %s -name "libchange_udf_no_init.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libchange_udf_no_process = subprocess.Popen('find %s -name "libchange_udf_no_process.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libchange_udf_no_destroy = subprocess.Popen('find %s -name "libchange_udf_no_destroy.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libchange_udf_init_failed = subprocess.Popen('find %s -name "libchange_udf_init_failed.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libchange_udf_process_failed = subprocess.Popen('find %s -name "libchange_udf_process_failed.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libchange_udf_destroy_failed = subprocess.Popen('find %s -name "libchange_udf_destory_failed.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libchange_udf_normal = subprocess.Popen('find %s -name "libchange_udf_normal.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libudf1 = self.libudf1.replace('\r','').replace('\n','') self.libudf2 = self.libudf2.replace('\r','').replace('\n','') + self.libchange_udf_no_init = self.libchange_udf_no_init.replace('\r','').replace('\n','') + self.libchange_udf_no_process = self.libchange_udf_no_process.replace('\r','').replace('\n','') + self.libchange_udf_normal = self.libchange_udf_normal.replace('\r','').replace('\n','') + self.libchange_udf_no_destroy = self.libchange_udf_no_destroy.replace('\r','').replace('\n','') + self.libchange_udf_init_failed = self.libchange_udf_init_failed.replace('\r','').replace('\n','') + self.libchange_udf_process_failed = self.libchange_udf_process_failed.replace('\r','').replace('\n','') + self.libchange_udf_destroy_failed = self.libchange_udf_destroy_failed.replace('\r','').replace('\n','') + tdLog.info(f"udf1 so path is {self.libudf1}") + tdLog.info(f"udf2 so path is {self.libudf2}") + tdLog.info(f"change_udf_no_init so path is {self.libchange_udf_no_init}") + tdLog.info(f"change_udf_no_process so path is {self.libchange_udf_no_process}") + tdLog.info(f"change_udf_no_destroy so path is {self.libchange_udf_no_destroy}") + tdLog.info(f"change_udf_init_failed so path is {self.libchange_udf_init_failed}") + tdLog.info(f"change_udf_process_failed so path is {self.libchange_udf_process_failed}") + tdLog.info(f"change_udf_destroy_failed so path is {self.libchange_udf_destroy_failed}") + tdLog.info(f"change_udf_normal so path is {self.libchange_udf_normal}") def prepare_data(self): @@ -664,12 +689,84 @@ class TDTestCase: path = ''.join(random.choice(letters) for i in range(5000)) os.system(f"udfd -c {path}") + + def test_change_udf_normal(self, func_name): + # create function with normal file + tdSql.execute(f"create function {func_name} as '%s' outputtype int"%self.libchange_udf_normal) + functions = tdSql.getResult("show functions") + for function in functions: + if f"{func_name}" in function[0]: + tdLog.info(f"create {func_name} functions success, using {self.libchange_udf_normal}") + break + tdSql.query(f"select num1 , {func_name}(num1) ,num2 ,{func_name}(num2),num3 ,{func_name}(num3),num4 ,{func_name}(num4) from db.tb", TSDB_CODE_UDF_FUNC_EXEC_FAILURE) + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + tdSql.checkData(0,2,1) + tdSql.checkData(0,3,1) + tdSql.checkData(0,4,1.000000000) + tdSql.checkData(0,5,1) + tdSql.checkData(0,6,"binary1") + tdSql.checkData(0,7,1) + tdSql.query(f"select {func_name}(num1) from db.tb", TSDB_CODE_UDF_FUNC_EXEC_FAILURE) + tdSql.execute(f"drop function {func_name}") + tdSql.error(f"select {func_name}(num1) from db.tb", TSDB_CODE_MND_FUNC_NOT_EXIST) + tdLog.info(f"change udf test finished, using {self.libchange_udf_normal}") + + def test_change_udf_failed(self, func_name, lib_name): + tdLog.info(f"test change udf start: using {lib_name}") + tdSql.error(f"select num1 , {func_name}(num1) ,num2 ,{func_name}(num2),num3 ,{func_name}(num3),num4 ,{func_name}(num4) from db.tb", TSDB_CODE_MND_FUNC_NOT_EXIST) + tdSql.execute(f"create function {func_name} as '{lib_name}' outputtype int") + functions = tdSql.getResult("show functions") + for function in functions: + if f"{func_name}" in function[0]: + tdLog.info(f"create {func_name} functions success, using {lib_name}") + break + + tdSql.error(f"select num1 , {func_name}(num1) ,num2 ,{func_name}(num2),num3 ,{func_name}(num3),num4 ,{func_name}(num4) from db.tb", TSDB_CODE_UDF_FUNC_EXEC_FAILURE) + tdSql.error(f"select {func_name}(num1) from db.tb", TSDB_CODE_UDF_FUNC_EXEC_FAILURE) + tdSql.execute(f"drop function {func_name}") + tdSql.error(f"select {func_name}(num1) from db.tb", TSDB_CODE_MND_FUNC_NOT_EXIST) + tdLog.info(f"change udf test finished, using {lib_name}") + + def unexpected_using_test(self): + tdSql.execute("use db ") + + # create function without wrong file path + tdSql.error("create function udf1 as '%s_wrongpath' outputtype int;"%self.libudf1, TAOS_SYSTEM_ERROR|2) + tdSql.error("create aggregate function udf2 as '%s_wrongpath' outputtype double;"%self.libudf2, TAOS_SYSTEM_ERROR|2) + + tdSql.execute("create function udf1 as '%s' outputtype int;"%self.libudf1) + tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb") + + self.test_change_udf_normal("change_udf_normal") + self.test_change_udf_failed("change_udf_no_init", self.libchange_udf_no_init) + + self.test_change_udf_normal("change_udf_normal") + self.test_change_udf_failed("change_udf_no_process", self.libchange_udf_no_process) + + self.test_change_udf_normal("change_udf_normal") + self.test_change_udf_failed("change_udf_no_destroy", self.libchange_udf_no_destroy) + + self.test_change_udf_normal("change_udf_normal") + self.test_change_udf_failed("change_udf_init_failed", self.libchange_udf_init_failed) + + self.test_change_udf_normal("change_udf_normal") + self.test_change_udf_failed("change_udf_process_failed", self.libchange_udf_process_failed) + + self.test_change_udf_normal("change_udf_normal") + self.test_change_udf_failed("libchange_udf_destroy_failed", self.libchange_udf_destroy_failed) + + tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb") + tdSql.execute(f"drop function udf1") + tdSql.error("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb", TSDB_CODE_MND_FUNC_NOT_EXIST) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring print(" env is ok for all ") self.test_udfd_cmd() self.prepare_udf_so() self.prepare_data() + + self.unexpected_using_test() self.create_udf_function() self.basic_udf_query() self.loop_kill_udfd() From 4d008308b8b176a605fd2611124c25215a26590a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 20 Dec 2024 18:49:57 +0800 Subject: [PATCH 08/45] refactor(analytics): do some refactor. --- source/util/src/tanalytics.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 3812295381..306795f5ed 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -479,12 +479,12 @@ static int32_t taosAnalJsonBufWriteStrUseCol(SAnalyticBuf *pBuf, const char *buf if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) { int32_t ret = taosWriteFile(pBuf->filePtr, buf, bufLen); - if (ret < 0) { + if (ret != bufLen) { return terrno; } } else { int32_t ret = taosWriteFile(pBuf->pCols[colIndex].filePtr, buf, bufLen); - if (ret < 0) { + if (ret != bufLen) { return terrno; } } From e98e21553519a48a907a322edc5adbe0e9372e5d Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Sun, 22 Dec 2024 11:16:06 +0800 Subject: [PATCH 09/45] fix: lock debug invalid read issue --- source/libs/qworker/inc/qwInt.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index 65f7cd772f..6474a1ab30 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -477,6 +477,8 @@ extern SQueryMgmt gQueryMgmt; } \ } while (0) + +#if 0 #define QW_UNLOCK(type, _lock) \ do { \ if (QW_READ == (type)) { \ @@ -505,6 +507,16 @@ extern SQueryMgmt gQueryMgmt; } \ } \ } while (0) +#else +#define QW_UNLOCK(type, _lock) \ + do { \ + if (QW_READ == (type)) { \ + taosRUnLockLatch(_lock); \ + } else { \ + taosWUnLockLatch(_lock); \ + } \ + } while (0) +#endif extern SQWorkerMgmt gQwMgmt; From 868701c01fd9f211253cb05009dfada724f0e2e7 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Mon, 23 Dec 2024 09:39:25 +0800 Subject: [PATCH 10/45] test coverage for planner/nodes --- source/libs/nodes/test/nodesTestMain.cpp | 80 ++++++++++++++++++++++++ source/libs/planner/src/planValidator.c | 29 +-------- tests/system-test/2-query/union.py | 9 +++ 3 files changed, 92 insertions(+), 26 deletions(-) diff --git a/source/libs/nodes/test/nodesTestMain.cpp b/source/libs/nodes/test/nodesTestMain.cpp index eb69017b11..e24a979b1e 100644 --- a/source/libs/nodes/test/nodesTestMain.cpp +++ b/source/libs/nodes/test/nodesTestMain.cpp @@ -128,6 +128,86 @@ TEST(NodesTest, sort) { nodesDestroyList(l); } +TEST(NodesTest, match) { + SNode* pOperator = NULL; + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOperator); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + SOperatorNode* pOp = (SOperatorNode*)pOperator; + SOperatorNode* pLeft = NULL; + ASSERT_EQ(TSDB_CODE_SUCCESS, nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pLeft)); + ASSERT_EQ(TSDB_CODE_SUCCESS, nodesMakeNode(QUERY_NODE_VALUE, &pLeft->pLeft)); + ((SValueNode*)(pLeft->pLeft))->literal = taosStrdup("10"); + ASSERT_EQ(TSDB_CODE_SUCCESS, nodesMakeNode(QUERY_NODE_VALUE, &pLeft->pRight)); + ((SValueNode*)(pLeft->pRight))->literal = taosStrdup("5"); + pOp->pLeft = (SNode*)pLeft; + ASSERT_EQ(TSDB_CODE_SUCCESS, nodesMakeNode(QUERY_NODE_VALUE, &pOp->pRight)); + ((SValueNode*)(pOp->pRight))->literal = taosStrdup("3"); + pOp->opType = OP_TYPE_GREATER_THAN; + + SNode* pOperatorClone = NULL; + code = nodesCloneNode(pOperator, &pOperatorClone); + ASSERT_TRUE(nodesMatchNode(pOperator, pOperatorClone)); + + SNode* pValue = NULL; + code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ((SValueNode*)pValue)->literal = taosStrdup("10"); + ASSERT_FALSE(nodesMatchNode(pOperator, pValue)); + + SNode* pValueClone = NULL; + code = nodesCloneNode(pValue, &pValueClone); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(nodesMatchNode(pValue, pValueClone)); + nodesDestroyNode(pValue); + nodesDestroyNode(pValueClone); + + SNode* pColumn = NULL, *pColumnClone = NULL; + code = nodesMakeNode(QUERY_NODE_COLUMN, &pColumn); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + strcpy(((SColumnNode*)pColumn)->colName, "column"); + strcpy(((SColumnNode*)pColumn)->tableName, "table"); + strcpy(((SColumnNode*)pColumn)->dbName, "db"); + strcpy(((SColumnNode*)pColumn)->node.aliasName, "column"); + ASSERT_FALSE(nodesMatchNode(pOperator, pColumn)); + code = nodesCloneNode(pColumn, &pColumnClone); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(nodesMatchNode(pColumn, pColumnClone)); + nodesDestroyNode(pColumn); + nodesDestroyNode(pColumnClone); + + SNode* pFunction = NULL, *pFunctionClone = NULL; + code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunction); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ((SFunctionNode*)pFunction)->funcId = 1; + strcpy(((SFunctionNode*)pFunction)->functionName, "now"); + ASSERT_FALSE(nodesMatchNode(pOperator, pFunction)); + code = nodesCloneNode(pFunction, &pFunctionClone); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(nodesMatchNode(pFunction, pFunctionClone)); + nodesDestroyNode(pFunctionClone); + nodesDestroyNode(pFunction); + + SNode* pLogicCondition = NULL, *pLogicConditionClone = NULL; + code = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION, (SNode**)&pLogicCondition); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ((SLogicConditionNode*)pLogicCondition)->condType = LOGIC_COND_TYPE_AND; + ((SLogicConditionNode*)pLogicCondition)->pParameterList = NULL; + code = nodesMakeList(&((SLogicConditionNode*)pLogicCondition)->pParameterList); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = nodesListAppend((SNodeList*)((SLogicConditionNode*)pLogicCondition)->pParameterList, pOperator); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + code = nodesListAppend(((SLogicConditionNode*)pLogicCondition)->pParameterList, pOperatorClone); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = nodesCloneNode(pLogicCondition, &pLogicConditionClone); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_TRUE(nodesMatchNode(pLogicCondition, pLogicConditionClone)); + ASSERT_FALSE(nodesMatchNode(pLogicCondition, pFunctionClone)); + + nodesDestroyNode(pLogicCondition); + nodesDestroyNode(pLogicConditionClone); +} + int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/source/libs/planner/src/planValidator.c b/source/libs/planner/src/planValidator.c index 6b7b46cfa7..a3b09dff22 100755 --- a/source/libs/planner/src/planValidator.c +++ b/source/libs/planner/src/planValidator.c @@ -55,16 +55,10 @@ int32_t validateQueryPlanNode(SValidatePlanContext* pCxt, SQueryPlan* pPlan) { SNode* pSubNode = NULL; SNodeListNode* pSubplans = (SNodeListNode*)pNode; FOREACH(pSubNode, pSubplans->pNodeList) { - if (QUERY_NODE_PHYSICAL_SUBPLAN != nodeType(pNode)) { - code = TSDB_CODE_PLAN_INTERNAL_ERROR; - break; - } - code = doValidatePhysiNode(pCxt, pSubNode); - if (code) { - break; - } + if (code) break; } + if (code) break; } return code; @@ -142,24 +136,7 @@ int32_t validateQueryPlan(SPlanContext* pCxt, SQueryPlan* pPlan) { int32_t code = TSDB_CODE_SUCCESS; SNode* pNode = NULL; - FOREACH(pNode, pPlan->pSubplans) { - if (QUERY_NODE_NODE_LIST != nodeType(pNode)) { - code = TSDB_CODE_PLAN_INTERNAL_ERROR; - break; - } - - SNode* pSubNode = NULL; - SNodeListNode* pSubplans = (SNodeListNode*)pNode; - FOREACH(pSubNode, pSubplans->pNodeList) { - code = doValidatePhysiNode(&cxt, pSubNode); - if (code) { - break; - } - } - if (code) { - break; - } - } + code = validateQueryPlanNode(&cxt, pPlan); destoryValidatePlanContext(&cxt); return code; diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index d462873ab9..f87df22948 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -406,6 +406,14 @@ class TDTestCase: tdSql.checkRows(6) ##tdSql.execute("drop database ep_iot") + def test_case_for_nodes_match_node(self): + sql = "create table nt (ts timestamp, c1 int primary key, c2 int)" + tdSql.execute(sql, queryTimes=1) + sql = 'select diff (ts) from (select * from tt union select * from tt order by c1, case when ts < now - 1h then ts + 1h else ts end) partition by c1, case when ts < now - 1h then ts + 1h else ts end' + tdSql.error(sql, -2147473917) + + pass + def run(self): tdSql.prepare() self.test_TS_5630() @@ -427,6 +435,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() self.test_TD_33137() + self.test_case_for_nodes_match_node() def test_TD_33137(self): sql = "select 'asd' union all select 'asdasd'" From aa9bab4f3d57cf0a4aec160b999beb88f105262e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 23 Dec 2024 10:29:05 +0800 Subject: [PATCH 11/45] Update 02-management.md --- docs/zh/06-advanced/06-TDgpt/02-management.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index b37c39944f..6803c5d356 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -110,7 +110,7 @@ SHOW ANODES; taos> show anodes; id | url | status | create_time | update_time | ================================================================================================================== - 1 | 192.168.0.1:6090 | ready | 2024-11-28 18:44:27.089 | 2024-11-28 18:44:27.089 | + 1 | 192.168.0.1:6090 | ready | 2024-11-28 18:44:27.089 | 2024-11-28 18:44:27.089 | Query OK, 1 row(s) in set (0.037205s) ``` From 0ca9054e60d09ca7b485e4269509a1139307bf54 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 23 Dec 2024 10:34:21 +0800 Subject: [PATCH 12/45] Update 02-management.md --- docs/zh/06-advanced/06-TDgpt/02-management.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index 6803c5d356..d977e25dc3 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -66,7 +66,7 @@ pidfile = /usr/local/taos/taosanode/taosanode.pid # uWSGI log files logto = /var/log/taos/taosanode/taosanode.log -# wWSGI monitor port +# uWSGI monitor port stats = 127.0.0.1:8387 # python virtual environment directory, used by Anode @@ -86,7 +86,7 @@ log-level = DEBUG **提示** 请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而导致 Anode 无法正常启动。 -上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数的设置及其说明请参考 [uWSGIS官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。 +上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数的设置及其说明请参考 [uWSGI 官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。 Anode 运行配置主要是以下: - app-log: Anode 服务运行产生的日志,用户可以调整其到需要的位置 From 589b36d7fc44de76631a3d909516f4beb1e73556 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 23 Dec 2024 10:38:06 +0800 Subject: [PATCH 13/45] Update index.md --- docs/zh/06-advanced/06-TDgpt/04-forecast/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md index 3981fff8c6..71b97aa996 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md @@ -41,7 +41,7 @@ algo=expr1 "} ``` -1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。 +1. `column_expr`:预测的时序数据列,只支持数值类型列输入。 2. `options`:预测函数的参数。字符串类型,其中使用 K=V 方式调用算法及相关参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下: ### 参数说明 From 3ac04949da4b73bc63ee1f45fb36a284bfaa856b Mon Sep 17 00:00:00 2001 From: dmchen Date: Mon, 23 Dec 2024 02:50:51 +0000 Subject: [PATCH 14/45] fix/TS-5751-delete-audit-case --- tests/parallel_test/cases.task | 1 + tests/system-test/0-others/taosd_audit.py | 144 ++++++++++++++++++++++ 2 files changed, 145 insertions(+) create mode 100644 tests/system-test/0-others/taosd_audit.py diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c9d28e0623..45009fe5f8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -363,6 +363,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/telemetry.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/backquote_check.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosdMonitor.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosd_audit.py ,,n,system-test,python3 ./test.py -f 0-others/taosdlog.py ,,n,system-test,python3 ./test.py -f 0-others/taosdShell.py -N 5 -M 3 -Q 3 ,,n,system-test,python3 ./test.py -f 0-others/udfTest.py diff --git a/tests/system-test/0-others/taosd_audit.py b/tests/system-test/0-others/taosd_audit.py new file mode 100644 index 0000000000..65a25cbdec --- /dev/null +++ b/tests/system-test/0-others/taosd_audit.py @@ -0,0 +1,144 @@ +import taos +import sys +import time +import socket +# import pexpect +import os +import http.server +import gzip +import threading +import json +import pickle + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +telemetryPort = '6043' +serverPort = '7080' +hostname = socket.gethostname() + +class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): + hostPort = hostname + ":" + serverPort + + def telemetryInfoCheck(self, infoDict=''): + if "records" not in infoDict or len(infoDict["records"]) == 0: + tdLog.exit("records is null!") + + if "operation" not in infoDict["records"][0] or infoDict["records"][0]["operation"] != "delete": + tdLog.exit("operation is null!") + + if "details" not in infoDict["records"][0] or infoDict["records"][0]["details"] != "delete from db3.tb": + tdLog.exit("details is null!") + + def do_GET(self): + """ + process GET request + """ + + def do_POST(self): + """ + process POST request + """ + contentEncoding = self.headers["Content-Encoding"] + + if contentEncoding == 'gzip': + req_body = self.rfile.read(int(self.headers["Content-Length"])) + plainText = gzip.decompress(req_body).decode() + else: + plainText = self.rfile.read(int(self.headers["Content-Length"])).decode() + + print(plainText) + # 1. send response code and header + self.send_response(200) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.end_headers() + + # 2. send response content + #self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8")) + + # 3. check request body info + infoDict = json.loads(plainText) + #print("================") + # print(infoDict) + + self.telemetryInfoCheck(infoDict) + + # 4. shutdown the server and exit case + assassin = threading.Thread(target=self.server.shutdown) + assassin.daemon = True + assassin.start() + print ("==== shutdown http server ====") + +class TDTestCase: + global hostname + global serverPort + if (platform.system().lower() == 'windows' and not tdDnodes.dnodes[0].remoteIP == ""): + try: + config = eval(tdDnodes.dnodes[0].remoteIP ) + hostname = config["host"] + except Exception: + hostname = tdDnodes.dnodes[0].remoteIP + rpcDebugFlagVal = '143' + clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + clientCfgDict["serverPort"] = serverPort + clientCfgDict["firstEp"] = hostname + ':' + serverPort + clientCfgDict["secondEp"] = hostname + ':' + serverPort + clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + clientCfgDict["fqdn"] = hostname + + updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + updatecfgDict["clientCfg"] = clientCfgDict + updatecfgDict["serverPort"] = serverPort + updatecfgDict["firstEp"] = hostname + ':' + serverPort + updatecfgDict["secondEp"] = hostname + ':' + serverPort + updatecfgDict["fqdn"] = hostname + + updatecfgDict["monitorFqdn"] = hostname + updatecfgDict["monitorPort"] = '6043' + updatecfgDict["monitor"] = '0' + updatecfgDict["monitorInterval"] = "5" + updatecfgDict["monitorMaxLogs"] = "10" + updatecfgDict["monitorComp"] = "1" + updatecfgDict["monitorForceV2"] = "0" + + updatecfgDict["audit"] = '1' + + print ("===================: ", updatecfgDict) + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + # time.sleep(2) + vgroups = "4" + sql = "create database db3 vgroups " + vgroups + tdSql.query(sql) + sql = "create table db3.stb (ts timestamp, f int) tags (t int)" + tdSql.query(sql) + sql = "create table db3.tb using db3.stb tags (1)" + tdSql.query(sql) + + sql = "delete from db3.tb" + tdSql.query(sql) + + # create http server: bing ip/port , and request processor + if (platform.system().lower() == 'windows' and not tdDnodes.dnodes[0].remoteIP == ""): + RequestHandlerImplStr = base64.b64encode(pickle.dumps(RequestHandlerImpl)).decode() + cmdStr = "import pickle\nimport http\nRequestHandlerImpl=pickle.loads(base64.b64decode(\"%s\".encode()))\nclass NewRequestHandlerImpl(RequestHandlerImpl):\n hostPort = \'%s\'\nhttp.server.HTTPServer((\"\", %s), NewRequestHandlerImpl).serve_forever()"%(RequestHandlerImplStr,hostname+":"+serverPort,telemetryPort) + tdDnodes.dnodes[0].remoteExec({}, cmdStr) + else: + serverAddress = ("", int(telemetryPort)) + http.server.HTTPServer(serverAddress, RequestHandlerImpl).serve_forever() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 6a0206d3171daa3990407e649b3763f05e8d21cb Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Mon, 23 Dec 2024 10:57:35 +0800 Subject: [PATCH 15/45] fix some case error --- source/libs/parser/src/parInsertStmt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 37d7b2f431..74fac463f1 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -509,6 +509,9 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c STag* pTag = NULL; for (int c = 0; c < tags->numOfBound; ++c) { + if (bind == NULL) { + break; + } if (bind[c].is_null && bind[c].is_null[0]) { continue; } From 6523f977b42bc9810f96577ecea3b4cd304e3318 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Mon, 23 Dec 2024 12:12:32 +0800 Subject: [PATCH 16/45] fix test case union.py --- tests/system-test/2-query/union.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index f87df22948..380b7879c4 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -407,13 +407,11 @@ class TDTestCase: ##tdSql.execute("drop database ep_iot") def test_case_for_nodes_match_node(self): - sql = "create table nt (ts timestamp, c1 int primary key, c2 int)" + sql = "create table db.nt (ts timestamp, c1 int primary key, c2 int)" tdSql.execute(sql, queryTimes=1) - sql = 'select diff (ts) from (select * from tt union select * from tt order by c1, case when ts < now - 1h then ts + 1h else ts end) partition by c1, case when ts < now - 1h then ts + 1h else ts end' + sql = 'select diff (ts) from (select * from db.tt union select * from db.tt order by c1, case when ts < now - 1h then ts + 1h else ts end) partition by c1, case when ts < now - 1h then ts + 1h else ts end' tdSql.error(sql, -2147473917) - pass - def run(self): tdSql.prepare() self.test_TS_5630() From 59003022d8788c09608f3bd39e8b811fa579ae9e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 23 Dec 2024 14:32:04 +0800 Subject: [PATCH 17/45] fix(analytics):copy the correct post rsp. --- source/util/src/tanalytics.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 306795f5ed..5a3ceef422 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -216,12 +216,28 @@ static size_t taosCurlWriteData(char *pCont, size_t contLen, size_t nmemb, void return 0; } - pRsp->dataLen = (int64_t)contLen * (int64_t)nmemb; - pRsp->data = taosMemoryMalloc(pRsp->dataLen + 1); + int64_t size = pRsp->dataLen + (int64_t)contLen * nmemb; + if (pRsp->data == NULL) { + pRsp->data = taosMemoryMalloc(size + 1); + if (pRsp->data == NULL) { + uError("failed to prepare recv buffer for post rsp, len:%d, code:%s", (int32_t) size + 1, tstrerror(terrno)); + return terrno; + } + } else { + char* p = taosMemoryRealloc(pRsp->data, size + 1); + if (p == NULL) { + uError("failed to prepare recv buffer for post rsp, len:%d, code:%s", (int32_t) size + 1, tstrerror(terrno)); + return terrno; + } + + pRsp->data = p; + } if (pRsp->data != NULL) { - (void)memcpy(pRsp->data, pCont, pRsp->dataLen); + (void)memcpy(pRsp->data + pRsp->dataLen, pCont, pRsp->dataLen); pRsp->data[pRsp->dataLen] = 0; + pRsp->dataLen = size; + uDebugL("curl response is received, len:%" PRId64 ", content:%s", pRsp->dataLen, pRsp->data); return pRsp->dataLen; } else { From 5ba7de5844867bd2e978506bfba5a56991d20b84 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Mon, 23 Dec 2024 15:37:26 +0800 Subject: [PATCH 18/45] Doc(cfg):support dyn alter disable create file. --- docs/en/26-tdinternal/01-arch.md | 33 +++++++++++++++++++-- docs/zh/26-tdinternal/01-arch.md | 50 +++++++++++++++++++++++++++++--- 2 files changed, 76 insertions(+), 7 deletions(-) diff --git a/docs/en/26-tdinternal/01-arch.md b/docs/en/26-tdinternal/01-arch.md index 55c56a7681..ef689e0b74 100644 --- a/docs/en/26-tdinternal/01-arch.md +++ b/docs/en/26-tdinternal/01-arch.md @@ -328,8 +328,35 @@ In addition to precomputation, TDengine also supports various downsampling stora ### Multi-Level Storage and Object Storage -By default, TDengine stores all data in the /var/lib/taos directory. To expand storage capacity, reduce potential bottlenecks caused by file reading, and enhance data throughput, TDengine allows the use of the configuration parameter `dataDir` to enable the cluster to utilize multiple mounted hard drives simultaneously. +By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir". + +dataDir format is as follows: + +``` +dataDir data_path [tier_level] [primary] [disable_create_new_file] +``` + +Where `data_path` is the folder path of mount point, and `tier_level` is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. And `primary` means whether the data dir is the primary mount point. Enter 0 for false or 1 for true. The default value is 1. A TDengine cluster can have only one `primary` mount point, which must be on tier 0. And `disable_create_new_file` means whether to prohibit the creation of new file sets on the specified mount point. Enter 0 for false and 1 for true. The default value is 0. Tier 0 storage must have at least one mount point with disable_create_new_file set to 0. Tier 1 and tier 2 storage do not have this restriction. + +Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: + +``` +dataDir /mnt/disk1/taos 0 1 0 +dataDir /mnt/disk2/taos 0 0 0 +dataDir /mnt/disk3/taos 1 0 0 +dataDir /mnt/disk4/taos 1 0 1 +dataDir /mnt/disk5/taos 2 0 0 +dataDir /mnt/disk6/taos 2 0 0 +``` + +Mounted disks can also be a non-local network disk, as long as the system can access it. + +You can use the following command to dynamically modify dataDir to control whether disable_create_new_file is enabled for the current directory. + +``` +alter dnode 1 "/mnt/disk2/taos 1"; +``` + +Note: Tiered Storage is only supported in Enterprise Edition -Additionally, TDengine offers tiered data storage functionality, allowing users to store data from different time periods in directories on different storage devices. This facilitates the separation of "hot" data (frequently accessed) and "cold" data (less frequently accessed), making full use of various storage resources while saving costs. For example, data that is recently collected and requires frequent access can be stored on high-performance solid-state drives due to their high read performance requirements. Data that exceeds a certain age and has lower query demands can be stored on mechanically driven hard disks, which are relatively cheaper. -To further reduce storage costs, TDengine also supports storing time-series data in object storage systems. Through its innovative design, in most cases, the performance of querying time-series data from object storage systems is close to half that of local disks, and in some scenarios, the performance can even be comparable to local disks. Additionally, TDengine allows users to perform delete and update operations on time-series data stored in object storage. diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md index 7091ca9661..242adb11b0 100644 --- a/docs/zh/26-tdinternal/01-arch.md +++ b/docs/zh/26-tdinternal/01-arch.md @@ -323,10 +323,52 @@ TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化 除了预计算功能以外,TDengine 还支持对原始数据进行多种降采样存储。一种降采样存储方式是 Rollup SMA,它能够自动对原始数据进行降采样存储,并支持 3 个不同的数据保存层级,用户可以指定每层数据的聚合周期和保存时长。这对于那些关注数据趋势的场景尤为适用,其核心目的是减少存储开销并提高查询速度。另一种降采样存储方式是 Time-Range-Wise SMA,它可以根据聚合结果进行降采样存储,非常适合于高频的 interval 查询场景。该功能采用与普通流计算相同的逻辑,并允许用户通过设置watermark 来处理延时数据,相应地,实际的查询结果也会有一定的时间延迟。 -### 多级存储与对象存储 +### 多级存储 -在默认情况下,TDengine 将所有数据存储在 /var/lib/taos 目录中。为了扩展存储容量,减少文件读取可能导致的瓶颈,并提升数据吞吐量,TDengine 允许通过配置参数dataDir,使得集群能够同时利用挂载的多块硬盘。 +说明:多级存储功能仅企业版支持,从 2.0.16.0 版本开始提供。 -此外,TDengine 还提供了数据分级存储的功能,允许用户将不同时间段的数据存储在不同存储设备的目录中,以此实现将“热”数据和“冷”数据分开存储。这样做可以充分利用各种存储资源,同时节约成本。例如,对于最新采集且需要频繁访问的数据,由于其读取性能要求较高,用户可以配置将这些数据存储在高性能的固态硬盘上。而对于超过一定期限、查询需求较低的数据,则可以将其存储在成本相对较低的机械硬盘上。 +在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。 -为了进一步降低存储成本,TDengine 还支持将时序数据存储在对象存储系统中。通过其创新性的设计,在大多数情况下,从对象存储系统中查询时序数据的性能接近本地硬盘的一半,而在某些场景下,性能甚至可以与本地硬盘相媲美。同时,TDengine 还允许用户对存储在对象存储中的时序数据执行删除和更新操作。 +除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。 + +多级存储支持 3 级,每级最多可配置 128 个挂载点。 + +TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中): + +``` +dataDir [path] +``` + +- path: 挂载点的文件夹路径 +- level: 介质存储等级,取值为 0,1,2。 + 0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。 + 各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。 + 同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。 + 需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。 +- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。 +- disable_create_new_file: 是否禁止创建新文件组,0(否)或 1(是),省略默认为 0。 + +在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式: + +``` +dataDir /mnt/data1 0 1 0 +dataDir /mnt/data2 0 0 0 +dataDir /mnt/data3 1 0 0 +dataDir /mnt/data4 1 0 1 +dataDir /mnt/data5 2 0 0 +dataDir /mnt/data6 2 0 0 +``` + +您可以使用以下命令动态修改 dataDir 的 disable 来控制当前目录是否开启 disable_create_new_file 。 +``` +alter dnode 1 "/mnt/disk2/taos 1"; +``` + +:::note + +1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。 +2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。 +3. 多级存储目前不支持删除已经挂载的硬盘的功能。 +4. 0 级存储至少存在一个 disable_create_new_file 为 0 的挂载点,1 级 和 2 级存储没有该限制。 + +::: \ No newline at end of file From 112754d2c5a20cd540339fb49675a6ff55fabd69 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Mon, 23 Dec 2024 15:39:16 +0800 Subject: [PATCH 19/45] Fix merge errors. --- docs/zh/26-tdinternal/01-arch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md index 242adb11b0..9cc1ef6f02 100644 --- a/docs/zh/26-tdinternal/01-arch.md +++ b/docs/zh/26-tdinternal/01-arch.md @@ -323,7 +323,7 @@ TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化 除了预计算功能以外,TDengine 还支持对原始数据进行多种降采样存储。一种降采样存储方式是 Rollup SMA,它能够自动对原始数据进行降采样存储,并支持 3 个不同的数据保存层级,用户可以指定每层数据的聚合周期和保存时长。这对于那些关注数据趋势的场景尤为适用,其核心目的是减少存储开销并提高查询速度。另一种降采样存储方式是 Time-Range-Wise SMA,它可以根据聚合结果进行降采样存储,非常适合于高频的 interval 查询场景。该功能采用与普通流计算相同的逻辑,并允许用户通过设置watermark 来处理延时数据,相应地,实际的查询结果也会有一定的时间延迟。 -### 多级存储 +### 多级存储与对象存储 说明:多级存储功能仅企业版支持,从 2.0.16.0 版本开始提供。 From a1e20680f78a8bcf85975843f8052fbc329002ae Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 23 Dec 2024 15:57:49 +0800 Subject: [PATCH 20/45] fix:[TD-33272]add test case --- source/client/test/smlTest.cpp | 1 + source/common/src/msg/tmsg.c | 2 + source/dnode/vnode/src/tq/tq.c | 3 +- source/dnode/vnode/src/tq/tqOffset.c | 72 ++++++++++++----------- source/dnode/vnode/test/CMakeLists.txt | 38 ++++++------- source/dnode/vnode/test/tqTest.cpp | 79 ++++++++++++++++++++++++++ source/libs/parser/src/parInsertSml.c | 3 +- utils/test/c/sml_test.c | 25 ++++++++ 8 files changed, 166 insertions(+), 57 deletions(-) create mode 100644 source/dnode/vnode/test/tqTest.cpp diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index 338457bec4..968a4e7c75 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -276,6 +276,7 @@ TEST(testCase, smlParseCols_Test) { info->dataFormat = false; SSmlLineInfo elements = {0}; info->msgBuf = msgBuf; + ASSERT_EQ(smlInitHandle(NULL), const char *data = "st,t=1 cb\\=in=\"pass\\,it " diff --git a/source/common/src/msg/tmsg.c b/source/common/src/msg/tmsg.c index 2193c7983f..166c889947 100644 --- a/source/common/src/msg/tmsg.c +++ b/source/common/src/msg/tmsg.c @@ -11141,6 +11141,7 @@ void tOffsetCopy(STqOffsetVal *pLeft, const STqOffsetVal *pRight) { } void tOffsetDestroy(void *param) { + if (param == NULL) return; STqOffsetVal *pVal = (STqOffsetVal *)param; if (IS_VAR_DATA_TYPE(pVal->primaryKey.type)) { taosMemoryFreeClear(pVal->primaryKey.pData); @@ -11148,6 +11149,7 @@ void tOffsetDestroy(void *param) { } void tDeleteSTqOffset(void *param) { + if (param == NULL) return; STqOffset *pVal = (STqOffset *)param; tOffsetDestroy(&pVal->val); } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 03037e529b..37f3572f65 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -152,10 +152,9 @@ void tqClose(STQ* pTq) { taosMemoryFree(pTq->path); tqMetaClose(pTq); - int32_t vgId = pTq->pStreamMeta->vgId; streamMetaClose(pTq->pStreamMeta); - qDebug("vgId:%d end to close tq", vgId); + qDebug("vgId:%d end to close tq", pTq->pStreamMeta != NULL ? pTq->pStreamMeta->vgId : -1); taosMemoryFree(pTq); } diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index 4d90091701..57a901a2e1 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -17,33 +17,41 @@ #include "tq.h" int32_t tqBuildFName(char** data, const char* path, char* name) { - if (data == NULL || path == NULL || name == NULL) { - return TSDB_CODE_INVALID_MSG; - } + int32_t code = 0; + int32_t lino = 0; + char* fname = NULL; + TSDB_CHECK_NULL(data, code, lino, END, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_NULL(path, code, lino, END, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_NULL(name, code, lino, END, TSDB_CODE_INVALID_MSG); int32_t len = strlen(path) + strlen(name) + 2; - char* fname = taosMemoryCalloc(1, len); - if(fname == NULL) { - return terrno; - } - int32_t code = tsnprintf(fname, len, "%s%s%s", path, TD_DIRSEP, name); - if (code < 0){ - code = TAOS_SYSTEM_ERROR(errno); - taosMemoryFree(fname); - return code; - } + fname = taosMemoryCalloc(1, len); + TSDB_CHECK_NULL(fname, code, lino, END, terrno); + code = tsnprintf(fname, len, "%s%s%s", path, TD_DIRSEP, name); + TSDB_CHECK_CODE(code, lino, END); + *data = fname; - return TDB_CODE_SUCCESS; + fname = NULL; + +END: + if (code != 0){ + tqError("%s failed at %d since %s", __func__, lino, tstrerror(code)); + } + taosMemoryFree(fname); + return code; } int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name) { - if (pTq == NULL || name == NULL) { - return TSDB_CODE_INVALID_MSG; - } - int32_t code = TDB_CODE_SUCCESS; - int32_t lino = 0; - void* pMemBuf = NULL; + int32_t code = TDB_CODE_SUCCESS; + int32_t lino = 0; + void* pMemBuf = NULL; + TdFilePtr pFile = NULL; + STqOffset *pOffset = NULL; + void *pIter = NULL; - TdFilePtr pFile = taosOpenFile(name, TD_FILE_READ); + TSDB_CHECK_NULL(pTq, code, lino, END, TSDB_CODE_INVALID_MSG); + TSDB_CHECK_NULL(name, code, lino, END, TSDB_CODE_INVALID_MSG); + + pFile = taosOpenFile(name, TD_FILE_READ); TSDB_CHECK_NULL(pFile, code, lino, END, TDB_CODE_SUCCESS); int64_t ret = 0; @@ -68,25 +76,20 @@ int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name) { STqOffset offset = {0}; code = tqMetaDecodeOffsetInfo(&offset, pMemBuf, size); TSDB_CHECK_CODE(code, lino, END); - code = taosHashPut(pTq->pOffset, offset.subKey, strlen(offset.subKey), &offset, sizeof(STqOffset)); - if (code != TDB_CODE_SUCCESS) { - tDeleteSTqOffset(&offset); - goto END; - } + pOffset = &offset; + code = taosHashPut(pTq->pOffset, pOffset->subKey, strlen(pOffset->subKey), pOffset, sizeof(STqOffset)); + TSDB_CHECK_CODE(code, lino, END); + pOffset = NULL; tqInfo("tq: offset restore from file to tdb, size:%d, hash size:%d subkey:%s", total, taosHashGetSize(pTq->pOffset), offset.subKey); taosMemoryFree(pMemBuf); pMemBuf = NULL; } - void *pIter = NULL; while ((pIter = taosHashIterate(pTq->pOffset, pIter))) { - STqOffset* pOffset = (STqOffset*)pIter; - code = tqMetaSaveOffset(pTq, pOffset); - if(code != 0){ - taosHashCancelIterate(pTq->pOffset, pIter); - goto END; - } + STqOffset* offset = (STqOffset*)pIter; + code = tqMetaSaveOffset(pTq, offset); + TSDB_CHECK_CODE(code, lino, END); } END: @@ -96,5 +99,8 @@ END: taosCloseFile(&pFile); taosMemoryFree(pMemBuf); + tDeleteSTqOffset(pOffset); + taosHashCancelIterate(pTq->pOffset, pIter); + return code; } diff --git a/source/dnode/vnode/test/CMakeLists.txt b/source/dnode/vnode/test/CMakeLists.txt index 724eabc751..826296e99f 100644 --- a/source/dnode/vnode/test/CMakeLists.txt +++ b/source/dnode/vnode/test/CMakeLists.txt @@ -1,29 +1,25 @@ -MESSAGE(STATUS "vnode unit test") +MESSAGE(STATUS "tq unit test") # GoogleTest requires at least C++11 SET(CMAKE_CXX_STANDARD 11) -AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -# add_executable(tqTest "") -# target_sources(tqTest -# PRIVATE -# "tqMetaTest.cpp" -# ) -# target_include_directories(tqTest -# PUBLIC -# "${TD_SOURCE_DIR}/include/server/vnode/tq" -# "${CMAKE_CURRENT_SOURCE_DIR}/../inc" -# ) +add_executable(tqTest tqTest.cpp) +target_include_directories(tqTest + PUBLIC + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" +) -# target_link_libraries(tqTest -# tq -# gtest_main -# ) -# enable_testing() -# add_test( -# NAME tq_test -# COMMAND tqTest -# ) +TARGET_LINK_LIBRARIES( + tqTest + PUBLIC os util common vnode gtest_main +) + +enable_testing() + +add_test( + NAME tq_test + COMMAND tqTest +) # ADD_EXECUTABLE(tsdbSmaTest tsdbSmaTest.cpp) # TARGET_LINK_LIBRARIES( diff --git a/source/dnode/vnode/test/tqTest.cpp b/source/dnode/vnode/test/tqTest.cpp new file mode 100644 index 0000000000..1b5fe4fdcd --- /dev/null +++ b/source/dnode/vnode/test/tqTest.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include +#include +#include + +#include +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wsign-compare" + +SDmNotifyHandle dmNotifyHdl = {.state = 0}; + +#include "tq.h" +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +STqOffset offset = {.subKey = "testtest", .val = {.type = TMQ_OFFSET__LOG, .version = 8923}}; + +void tqWriteOffset() { + TdFilePtr pFile = taosOpenFile(TQ_OFFSET_NAME, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); + + int32_t bodyLen; + int32_t code; + tEncodeSize(tEncodeSTqOffset, &offset, bodyLen, code); + int32_t totLen = INT_BYTES + bodyLen; + void* buf = taosMemoryCalloc(1, totLen); + void* abuf = POINTER_SHIFT(buf, INT_BYTES); + + *(int32_t*)buf = htonl(bodyLen); + SEncoder encoder; + tEncoderInit(&encoder, (uint8_t*)abuf, bodyLen); + tEncodeSTqOffset(&encoder, &offset); + taosWriteFile(pFile, buf, totLen); + + taosMemoryFree(buf); + + taosCloseFile(&pFile); +} + +TEST(testCase, tqOffsetTest) { + STQ* pTq = (STQ*)taosMemoryCalloc(1, sizeof(STQ)); + pTq->path = taosStrdup("./"); + + pTq->pOffset = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_ENTRY_LOCK); + taosHashSetFreeFp(pTq->pOffset, (FDelete)tDeleteSTqOffset); + + tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaDB, 0, 0, NULL); + tdbTbOpen("tq.offset.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pOffsetStore, 0); + + tqWriteOffset(); + tqOffsetRestoreFromFile(pTq, TQ_OFFSET_NAME); + taosRemoveFile(TQ_OFFSET_NAME); + tqClose(pTq); +} + +#pragma GCC diagnostic pop diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index d56cf7916f..bf86ef718e 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -357,10 +357,11 @@ end: int32_t smlInitHandle(SQuery** query) { int32_t lino = 0; int32_t code = 0; - *query = NULL; SQuery* pQuery = NULL; SVnodeModifyOpStmt* stmt = NULL; + TSDB_CHECK_NULL(query, code, lino, end, TSDB_CODE_INVALID_PARA); + *query = NULL; code = nodesMakeNode(QUERY_NODE_QUERY, (SNode**)&pQuery); TSDB_CHECK_CODE(code, lino, end); pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index d922a6454e..0907c2a641 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -1939,6 +1939,20 @@ int sml_td24559_Test() { } taos_free_result(pRes); + const char *sql2[] = { + "stb,t1=1 f1=283i32,f2=g\"Point(4.343 89.342)\" 1632299375000", + }; + + pRes = taos_query(taos, "use td24559"); + taos_free_result(pRes); + + pRes = taos_schemaless_insert(taos, (char **)sql2, sizeof(sql2) / sizeof(sql2[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + + code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + taos_free_result(pRes); + pRes = taos_query(taos, "drop database if exists td24559"); taos_free_result(pRes); @@ -2325,6 +2339,17 @@ int sml_td17324_Test() { ASSERT(code == 0); taos_free_result(pRes); + const char *sql1[] = { + "st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"pa3ssit\",c2=false,c4=4f64 1732700000394000000", + }; + + pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); + code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + ASSERT(code == 0); + taos_free_result(pRes); + taos_close(taos); return code; From 4f77274af46e84514eeaeda017d3ac79371bde90 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 23 Dec 2024 16:15:44 +0800 Subject: [PATCH 21/45] fix:[TD-33272]add test case --- source/client/test/smlTest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index 968a4e7c75..cbd13c6749 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -276,7 +276,7 @@ TEST(testCase, smlParseCols_Test) { info->dataFormat = false; SSmlLineInfo elements = {0}; info->msgBuf = msgBuf; - ASSERT_EQ(smlInitHandle(NULL), + ASSERT_EQ(smlInitHandle(NULL), 0), const char *data = "st,t=1 cb\\=in=\"pass\\,it " From 0beaecc5d6dd92740f2f45784424f796362d47b1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 23 Dec 2024 16:41:50 +0800 Subject: [PATCH 22/45] fix(analytics): fix bugs in recv post results. --- source/util/src/tanalytics.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 5a3ceef422..e68edd4b76 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -216,30 +216,33 @@ static size_t taosCurlWriteData(char *pCont, size_t contLen, size_t nmemb, void return 0; } - int64_t size = pRsp->dataLen + (int64_t)contLen * nmemb; + int64_t newDataSize = (int64_t) contLen * nmemb; + int64_t size = pRsp->dataLen + newDataSize; + if (pRsp->data == NULL) { pRsp->data = taosMemoryMalloc(size + 1); if (pRsp->data == NULL) { uError("failed to prepare recv buffer for post rsp, len:%d, code:%s", (int32_t) size + 1, tstrerror(terrno)); - return terrno; + return 0; // return the recv length, if failed, return 0 } } else { char* p = taosMemoryRealloc(pRsp->data, size + 1); if (p == NULL) { uError("failed to prepare recv buffer for post rsp, len:%d, code:%s", (int32_t) size + 1, tstrerror(terrno)); - return terrno; + return 0; // return the recv length, if failed, return 0 } pRsp->data = p; } if (pRsp->data != NULL) { - (void)memcpy(pRsp->data + pRsp->dataLen, pCont, pRsp->dataLen); - pRsp->data[pRsp->dataLen] = 0; - pRsp->dataLen = size; + (void)memcpy(pRsp->data + pRsp->dataLen, pCont, newDataSize); - uDebugL("curl response is received, len:%" PRId64 ", content:%s", pRsp->dataLen, pRsp->data); - return pRsp->dataLen; + pRsp->dataLen = size; + pRsp->data[size] = 0; + + uDebugL("curl response is received, len:%" PRId64 ", content:%s", size, pRsp->data); + return newDataSize; } else { pRsp->dataLen = 0; uError("failed to malloc curl response"); From 196ef024c534bb50b5b02cfccb5ad071d38e3556 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 23 Dec 2024 16:52:14 +0800 Subject: [PATCH 23/45] fix:[TD-33272]add test case --- source/client/test/smlTest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index cbd13c6749..87be16e467 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -276,7 +276,7 @@ TEST(testCase, smlParseCols_Test) { info->dataFormat = false; SSmlLineInfo elements = {0}; info->msgBuf = msgBuf; - ASSERT_EQ(smlInitHandle(NULL), 0), + ASSERT_EQ(smlInitHandle(NULL), 0); const char *data = "st,t=1 cb\\=in=\"pass\\,it " From 4bc4894d7c6ce7f793e19b9cf1f751dfbda9b8b9 Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Mon, 23 Dec 2024 14:13:00 +0800 Subject: [PATCH 24/45] fix:[TS-5798] Fix crash when function's param num more than 127. --- .../14-reference/03-taos-sql/10-function.md | 1 + .../14-reference/03-taos-sql/10-function.md | 1 + source/libs/function/src/builtins.c | 10 ++++- .../army/query/function/test_func_paramnum.py | 42 +++++++++++++++++++ tests/parallel_test/cases.task | 1 + 5 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 tests/army/query/function/test_func_paramnum.py diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index e6cfa20bd4..ab5c48bce2 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -943,6 +943,7 @@ CHAR(expr1 [, expr2] [, expr3] ...) - NULL values in input parameters will be skipped. - If the input parameters are of string type, they will be converted to numeric type for processing. - If the character corresponding to the input parameter is a non-printable character, the return value will still contain the character corresponding to that parameter, but it may not be displayed. +- This function can have at most 2^31 - 1 input parameters. **Examples**: diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index c075545ff3..eb3a4bb0ed 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -902,6 +902,7 @@ CHAR(expr1 [, expr2] [, epxr3] ...) - 输入参数的 NULL 值会被跳过。 - 输入参数若为字符串类型,会将其转换为数值类型处理。 - 若输入的参数对应的字符为不可打印字符,返回值中仍有该参数对应的字符,但是可能无法显示出来。 +- 输入参数的个数上限为 2^31 - 1 个。 **举例**: ```sql diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 95a332ac05..b42d739b40 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -828,14 +828,20 @@ static int32_t validateParam(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { const SParamInfo* paramPattern = funcMgtBuiltins[pFunc->funcId].parameters.inputParaInfo[i]; while (1) { - for (int8_t j = paramPattern[paramIdx].startParam; - j <= (paramPattern[paramIdx].endParam == -1 ? INT8_MAX : paramPattern[paramIdx].endParam); j++) { + // one table can have at most 4096 columns, int32_t is enough. + for (int32_t j = paramPattern[paramIdx].startParam; + j <= (paramPattern[paramIdx].endParam == -1 ? INT32_MAX - 1 : paramPattern[paramIdx].endParam); j++) { if (j > LIST_LENGTH(paramList)) { code = TSDB_CODE_SUCCESS; isMatch = true; break; } SNode* pNode = nodesListGetNode(paramList, j - 1); + if (NULL == pNode) { + code = TSDB_CODE_FUNC_FUNTION_PARA_NUM; + isMatch = false; + break; + } // check node type if (!paramSupportNodeType(pNode, paramPattern[paramIdx].validNodeType)) { code = TSDB_CODE_FUNC_FUNTION_PARA_TYPE; diff --git a/tests/army/query/function/test_func_paramnum.py b/tests/army/query/function/test_func_paramnum.py new file mode 100644 index 0000000000..37930cd624 --- /dev/null +++ b/tests/army/query/function/test_func_paramnum.py @@ -0,0 +1,42 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from frame import etool +from frame.etool import * +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.common import * + +class TDTestCase(TBase): + def create_table(self): + etool.benchMark(command = "-l 1000 -n 1 -d ts_5798") + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + self.create_table() + + tdSql.query("select last_row(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, c50, c51, c52, c53, c54, c55, c56, c57, c58, c59, c60, c61, c62, c63, c64, c65, c66, c67, c68, c69, c70, c71, c72, c73, c74, c75, c76, c77, c78, c79, c80, c81, c82, c83, c84, c85, c86, c87, c88, c89, c90, c91, c92, c93, c94, c95, c96, c97, c98, c99, c100, c101, c102, c103, c104, c105, c106, c107, c108, c109, c110, c111, c112, c113, c114, c115, c116, c117, c118, c119, c120, c121, c122, c123, c124, c125, c126, c127, c128, c129, c130, c131, c132, c133, c134, c135, c136, c137, c138, c139, c140, c141, c142, c143, c144, c145, c146, c147, c148, c149, c150, c151, c152, c153, c154, c155, c156, c157, c158, c159, c160, c161, c162, c163, c164, c165, c166, c167, c168, c169, c170, c171, c172, c173, c174, c175, c176, c177, c178, c179, c180, c181, c182, c183, c184, c185, c186, c187, c188, c189, c190, c191, c192, c193, c194, c195, c196, c197, c198, c199, c200, c201, c202, c203, c204, c205, c206, c207, c208, c209, c210, c211, c212, c213, c214, c215, c216, c217, c218, c219, c220, c221, c222, c223, c224, c225, c226, c227, c228, c229, c230, c231, c232, c233, c234, c235, c236, c237, c238, c239, c240, c241, c242, c243, c244, c245, c246, c247, c248, c249, c250, c251, c252, c253, c254, c255, c256, c257, c258, c259, c260, c261, c262, c263, c264, c265, c266, c267, c268, c269, c270, c271, c272, c273, c274, c275, c276, c277, c278, c279, c280, c281, c282, c283, c284, c285, c286, c287, c288, c289, c290, c291, c292, c293, c294, c295, c296, c297, c298, c299, c300, c301, c302, c303, c304, c305, c306, c307, c308, c309, c310, c311, c312, c313, c314, c315, c316, c317, c318, c319, c320, c321, c322, c323, c324, c325, c326, c327, c328, c329, c330, c331, c332, c333, c334, c335, c336, c337, c338, c339, c340, c341, c342, c343, c344, c345, c346, c347, c348, c349, c350, c351, c352, c353, c354, c355, c356, c357, c358, c359, c360, c361, c362, c363, c364, c365, c366, c367, c368, c369, c370, c371, c372, c373, c374, c375, c376, c377, c378, c379, c380, c381, c382, c383, c384, c385, c386, c387, c388, c389, c390, c391, c392, c393, c394, c395, c396, c397, c398, c399, c400, c401, c402, c403, c404, c405, c406, c407, c408, c409, c410, c411, c412, c413, c414, c415, c416, c417, c418, c419, c420, c421, c422, c423, c424, c425, c426, c427, c428, c429, c430, c431, c432, c433, c434, c435, c436, c437, c438, c439, c440, c441, c442, c443, c444, c445, c446, c447, c448, c449, c450, c451, c452, c453, c454, c455, c456, c457, c458, c459, c460, c461, c462, c463, c464, c465, c466, c467, c468, c469, c470, c471, c472, c473, c474, c475, c476, c477, c478, c479, c480, c481, c482, c483, c484, c485, c486, c487, c488, c489, c490, c491, c492, c493, c494, c495, c496, c497, c498, c499, c500, c501, c502, c503, c504, c505, c506, c507, c508, c509, c510, c511, c512, c513, c514, c515, c516, c517, c518, c519, c520) from ts_5798.meters;") + tdSql.checkRows(1) + tdSql.query("select first(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, c50, c51, c52, c53, c54, c55, c56, c57, c58, c59, c60, c61, c62, c63, c64, c65, c66, c67, c68, c69, c70, c71, c72, c73, c74, c75, c76, c77, c78, c79, c80, c81, c82, c83, c84, c85, c86, c87, c88, c89, c90, c91, c92, c93, c94, c95, c96, c97, c98, c99, c100, c101, c102, c103, c104, c105, c106, c107, c108, c109, c110, c111, c112, c113, c114, c115, c116, c117, c118, c119, c120, c121, c122, c123, c124, c125, c126, c127, c128, c129, c130, c131, c132, c133, c134, c135, c136, c137, c138, c139, c140, c141, c142, c143, c144, c145, c146, c147, c148, c149, c150, c151, c152, c153, c154, c155, c156, c157, c158, c159, c160, c161, c162, c163, c164, c165, c166, c167, c168, c169, c170, c171, c172, c173, c174, c175, c176, c177, c178, c179, c180, c181, c182, c183, c184, c185, c186, c187, c188, c189, c190, c191, c192, c193, c194, c195, c196, c197, c198, c199, c200, c201, c202, c203, c204, c205, c206, c207, c208, c209, c210, c211, c212, c213, c214, c215, c216, c217, c218, c219, c220, c221, c222, c223, c224, c225, c226, c227, c228, c229, c230, c231, c232, c233, c234, c235, c236, c237, c238, c239, c240, c241, c242, c243, c244, c245, c246, c247, c248, c249, c250, c251, c252, c253, c254, c255, c256, c257, c258, c259, c260, c261, c262, c263, c264, c265, c266, c267, c268, c269, c270, c271, c272, c273, c274, c275, c276, c277, c278, c279, c280, c281, c282, c283, c284, c285, c286, c287, c288, c289, c290, c291, c292, c293, c294, c295, c296, c297, c298, c299, c300, c301, c302, c303, c304, c305, c306, c307, c308, c309, c310, c311, c312, c313, c314, c315, c316, c317, c318, c319, c320, c321, c322, c323, c324, c325, c326, c327, c328, c329, c330, c331, c332, c333, c334, c335, c336, c337, c338, c339, c340, c341, c342, c343, c344, c345, c346, c347, c348, c349, c350, c351, c352, c353, c354, c355, c356, c357, c358, c359, c360, c361, c362, c363, c364, c365, c366, c367, c368, c369, c370, c371, c372, c373, c374, c375, c376, c377, c378, c379, c380, c381, c382, c383, c384, c385, c386, c387, c388, c389, c390, c391, c392, c393, c394, c395, c396, c397, c398, c399, c400, c401, c402, c403, c404, c405, c406, c407, c408, c409, c410, c411, c412, c413, c414, c415, c416, c417, c418, c419, c420, c421, c422, c423, c424, c425, c426, c427, c428, c429, c430, c431, c432, c433, c434, c435, c436, c437, c438, c439, c440, c441, c442, c443, c444, c445, c446, c447, c448, c449, c450, c451, c452, c453, c454, c455, c456, c457, c458, c459, c460, c461, c462, c463, c464, c465, c466, c467, c468, c469, c470, c471, c472, c473, c474, c475, c476, c477, c478, c479, c480, c481, c482, c483, c484, c485, c486, c487, c488, c489, c490, c491, c492, c493, c494, c495, c496, c497, c498, c499, c500, c501, c502, c503, c504, c505, c506, c507, c508, c509, c510, c511, c512, c513, c514, c515, c516, c517, c518, c519, c520) from ts_5798.meters;") + tdSql.checkRows(1) + tdSql.query("select last(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, c50, c51, c52, c53, c54, c55, c56, c57, c58, c59, c60, c61, c62, c63, c64, c65, c66, c67, c68, c69, c70, c71, c72, c73, c74, c75, c76, c77, c78, c79, c80, c81, c82, c83, c84, c85, c86, c87, c88, c89, c90, c91, c92, c93, c94, c95, c96, c97, c98, c99, c100, c101, c102, c103, c104, c105, c106, c107, c108, c109, c110, c111, c112, c113, c114, c115, c116, c117, c118, c119, c120, c121, c122, c123, c124, c125, c126, c127, c128, c129, c130, c131, c132, c133, c134, c135, c136, c137, c138, c139, c140, c141, c142, c143, c144, c145, c146, c147, c148, c149, c150, c151, c152, c153, c154, c155, c156, c157, c158, c159, c160, c161, c162, c163, c164, c165, c166, c167, c168, c169, c170, c171, c172, c173, c174, c175, c176, c177, c178, c179, c180, c181, c182, c183, c184, c185, c186, c187, c188, c189, c190, c191, c192, c193, c194, c195, c196, c197, c198, c199, c200, c201, c202, c203, c204, c205, c206, c207, c208, c209, c210, c211, c212, c213, c214, c215, c216, c217, c218, c219, c220, c221, c222, c223, c224, c225, c226, c227, c228, c229, c230, c231, c232, c233, c234, c235, c236, c237, c238, c239, c240, c241, c242, c243, c244, c245, c246, c247, c248, c249, c250, c251, c252, c253, c254, c255, c256, c257, c258, c259, c260, c261, c262, c263, c264, c265, c266, c267, c268, c269, c270, c271, c272, c273, c274, c275, c276, c277, c278, c279, c280, c281, c282, c283, c284, c285, c286, c287, c288, c289, c290, c291, c292, c293, c294, c295, c296, c297, c298, c299, c300, c301, c302, c303, c304, c305, c306, c307, c308, c309, c310, c311, c312, c313, c314, c315, c316, c317, c318, c319, c320, c321, c322, c323, c324, c325, c326, c327, c328, c329, c330, c331, c332, c333, c334, c335, c336, c337, c338, c339, c340, c341, c342, c343, c344, c345, c346, c347, c348, c349, c350, c351, c352, c353, c354, c355, c356, c357, c358, c359, c360, c361, c362, c363, c364, c365, c366, c367, c368, c369, c370, c371, c372, c373, c374, c375, c376, c377, c378, c379, c380, c381, c382, c383, c384, c385, c386, c387, c388, c389, c390, c391, c392, c393, c394, c395, c396, c397, c398, c399, c400, c401, c402, c403, c404, c405, c406, c407, c408, c409, c410, c411, c412, c413, c414, c415, c416, c417, c418, c419, c420, c421, c422, c423, c424, c425, c426, c427, c428, c429, c430, c431, c432, c433, c434, c435, c436, c437, c438, c439, c440, c441, c442, c443, c444, c445, c446, c447, c448, c449, c450, c451, c452, c453, c454, c455, c456, c457, c458, c459, c460, c461, c462, c463, c464, c465, c466, c467, c468, c469, c470, c471, c472, c473, c474, c475, c476, c477, c478, c479, c480, c481, c482, c483, c484, c485, c486, c487, c488, c489, c490, c491, c492, c493, c494, c495, c496, c497, c498, c499, c500, c501, c502, c503, c504, c505, c506, c507, c508, c509, c510, c511, c512, c513, c514, c515, c516, c517, c518, c519, c520) from ts_5798.meters;") + tdSql.checkRows(1) + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c9d28e0623..0377ff1641 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -16,6 +16,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_function.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_selection_function_with_json.py +,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_paramnum.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_percentile.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_resinfo.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_interp.py From 6244712887e79015240bef5cf21f3398ae88d505 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 23 Dec 2024 18:12:47 +0800 Subject: [PATCH 25/45] fix:[TD-33272]add test case --- source/dnode/vnode/src/tq/tqOffset.c | 3 +- source/dnode/vnode/test/tqTest.cpp | 4 +- .../7-tmq/tmqVnodeSplit-ntb-select.py | 192 ++++++++++++++++++ 3 files changed, 195 insertions(+), 4 deletions(-) create mode 100644 tests/system-test/7-tmq/tmqVnodeSplit-ntb-select.py diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index 57a901a2e1..a131f30a04 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -26,8 +26,7 @@ int32_t tqBuildFName(char** data, const char* path, char* name) { int32_t len = strlen(path) + strlen(name) + 2; fname = taosMemoryCalloc(1, len); TSDB_CHECK_NULL(fname, code, lino, END, terrno); - code = tsnprintf(fname, len, "%s%s%s", path, TD_DIRSEP, name); - TSDB_CHECK_CODE(code, lino, END); + (void)tsnprintf(fname, len, "%s%s%s", path, TD_DIRSEP, name); *data = fname; fname = NULL; diff --git a/source/dnode/vnode/test/tqTest.cpp b/source/dnode/vnode/test/tqTest.cpp index 1b5fe4fdcd..d757aa6d43 100644 --- a/source/dnode/vnode/test/tqTest.cpp +++ b/source/dnode/vnode/test/tqTest.cpp @@ -37,11 +37,11 @@ int main(int argc, char **argv) { return RUN_ALL_TESTS(); } -STqOffset offset = {.subKey = "testtest", .val = {.type = TMQ_OFFSET__LOG, .version = 8923}}; - void tqWriteOffset() { TdFilePtr pFile = taosOpenFile(TQ_OFFSET_NAME, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); + STqOffset offset = {.val = {.type = TMQ_OFFSET__LOG, .version = 8923}}; + strcpy(offset.subKey, "testtest"); int32_t bodyLen; int32_t code; tEncodeSize(tEncodeSTqOffset, &offset, bodyLen, code); diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-ntb-select.py b/tests/system-test/7-tmq/tmqVnodeSplit-ntb-select.py new file mode 100644 index 0000000000..8f8eb7d4ea --- /dev/null +++ b/tests/system-test/7-tmq/tmqVnodeSplit-ntb-select.py @@ -0,0 +1,192 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +from util.cluster import * +sys.path.append("./7-tmq") +from tmqCommon import * + +from util.cluster import * +sys.path.append("./6-cluster") +from clusterCommonCreate import * +from clusterCommonCheck import clusterComCheck + + + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 10 + self.rowsPerTbl = 1000 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def getDataPath(self): + selfPath = tdCom.getBuildPath() + + return selfPath + '/../sim/dnode%d/data/vnode/vnode%d/wal/*'; + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 120, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdCom.drop_all_db() + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], wal_retention_period=36000,vgroups=paraDict["vgroups"],replica=self.replicaVar) + tdLog.info("create stb") + tdSql.query("create table dbt.t(ts timestamp, v int)") + tdSql.query("insert into dbt.t values('2022-01-01 00:00:00.000', 0)") + tdSql.query("insert into dbt.t values('2022-01-01 00:00:02.000', 0)") + tdSql.query("insert into dbt.t values('2022-01-01 00:00:03.000', 0)") + return + + def restartAndRemoveWal(self, deleteWal): + tdDnodes = cluster.dnodes + tdSql.query("select * from information_schema.ins_vnodes") + for result in tdSql.queryResult: + if result[2] == 'dbt': + tdLog.debug("dnode is %d"%(result[0])) + dnodeId = result[0] + vnodeId = result[1] + + tdDnodes[dnodeId - 1].stoptaosd() + time.sleep(1) + dataPath = self.getDataPath() + dataPath = dataPath%(dnodeId,vnodeId) + tdLog.debug("dataPath:%s"%dataPath) + if deleteWal: + if os.system('rm -rf ' + dataPath) != 0: + tdLog.exit("rm error") + + tdDnodes[dnodeId - 1].starttaosd() + time.sleep(1) + break + tdLog.debug("restart dnode ok") + + def splitVgroups(self): + tdSql.query("select * from information_schema.ins_vnodes") + vnodeId = 0 + for result in tdSql.queryResult: + if result[2] == 'dbt': + vnodeId = result[1] + tdLog.debug("vnode is %d"%(vnodeId)) + break + splitSql = "split vgroup %d" %(vnodeId) + tdLog.debug("splitSql:%s"%(splitSql)) + tdSql.query(splitSql) + tdLog.debug("splitSql ok") + + def tmqCase1(self, deleteWal=False): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb1', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 2, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + # expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from ntb with filter") + queryString = "select * from %s.t"%(paraDict['dbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + + tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartCommitNotifyFromTmqsim() + + #restart dnode & remove wal + self.restartAndRemoveWal(deleteWal) + + # split vgroup + self.splitVgroups() + + clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240) + + time.sleep(3) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + self.prepareTestEnv() + self.tmqCase1(True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 4adf24b466bba5999ced38929f92397eee7df96a Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 23 Dec 2024 18:25:25 +0800 Subject: [PATCH 26/45] ci(stream):add stream unit test --- source/libs/stream/inc/streamInt.h | 2 + source/libs/stream/src/streamCheckpoint.c | 3 +- .../libs/stream/test/streamCheckPointTest.cpp | 125 ++++++++++++++++++ 3 files changed, 128 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 41ac0117f3..7af64c041d 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -252,6 +252,8 @@ void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId); int32_t uploadCheckpointData(SStreamTask* pTask, int64_t checkpointId, int64_t dbRefId, ECHECKPOINT_BACKUP_TYPE type); int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, void* param, SArray** ppNotSendList); +int32_t downloadCheckpointByNameS3(const char* id, const char* fname, const char* dstName); +int32_t uploadCheckpointToS3(const char* id, const char* path); #ifdef __cplusplus } diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 0ec66cd2ce..ebde9fe50e 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -24,7 +24,6 @@ static int32_t streamTaskUploadCheckpoint(const char* id, const char* path, int6 #ifdef BUILD_NO_CALL static int32_t deleteCheckpoint(const char* id); #endif -static int32_t downloadCheckpointByNameS3(const char* id, const char* fname, const char* dstName); static int32_t continueDispatchCheckpointTriggerBlock(SStreamDataBlock* pBlock, SStreamTask* pTask); static int32_t appendCheckpointIntoInputQ(SStreamTask* pTask, int32_t checkpointType, int64_t checkpointId, int32_t transId, int32_t srcTaskId); @@ -1355,7 +1354,7 @@ void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId) { } } -static int32_t uploadCheckpointToS3(const char* id, const char* path) { +int32_t uploadCheckpointToS3(const char* id, const char* path) { int32_t code = 0; int32_t nBytes = 0; /* diff --git a/source/libs/stream/test/streamCheckPointTest.cpp b/source/libs/stream/test/streamCheckPointTest.cpp index 80dd3ec142..17d8b55560 100644 --- a/source/libs/stream/test/streamCheckPointTest.cpp +++ b/source/libs/stream/test/streamCheckPointTest.cpp @@ -1,6 +1,7 @@ #include #include "tstream.h" #include "streamInt.h" +#include "tcs.h" #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wwrite-strings" @@ -217,6 +218,10 @@ TEST(StreamTaskAlreadySendTriggerTest, AlreadySendTrigger) { taosArrayDestroy(array); } +int32_t sendReq1111(const SEpSet *pEpSet, SRpcMsg *pMsg) { + return TSDB_CODE_SUCCESS; +} + TEST(ChkptTriggerRecvMonitorHelperTest, chkptTriggerRecvMonitorHelper) { SStreamTask* pTask = NULL; int64_t uid = 2222222222222; @@ -239,6 +244,11 @@ TEST(ChkptTriggerRecvMonitorHelperTest, chkptTriggerRecvMonitorHelper) { pTask->chkInfo.pActiveInfo->transId = 4561111; pTask->chkInfo.startTs = 11111; + SStreamTask upTask; + upTask = *pTask; + streamTaskSetUpstreamInfo(pTask, &upTask); + + streamTaskSetStatusReady(pTask); code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT); ASSERT_EQ(code, TSDB_CODE_SUCCESS); @@ -254,7 +264,21 @@ TEST(ChkptTriggerRecvMonitorHelperTest, chkptTriggerRecvMonitorHelper) { taosArrayPush(pTask->chkInfo.pActiveInfo->pDispatchTriggerList, &triggerInfo); + STaskCheckpointReadyInfo readyInfo; + readyInfo.upstreamNodeId = 789111; + void* pBuf = rpcMallocCont(sizeof(SMsgHead) + 1); + + initRpcMsg(&readyInfo.msg, 0, pBuf, sizeof(SMsgHead) + 1); + taosArrayPush(pTask->chkInfo.pActiveInfo->pReadyMsgList, &readyInfo); + + pTask->chkInfo.pActiveInfo->dispatchTrigger = true; + + SMsgCb msgCb = {0}; + msgCb.sendReqFp = sendReq1111; + msgCb.mgmt = (SMgmtWrapper*)(&msgCb); // hack + tmsgSetDefault(&msgCb); + SArray* array1 = NULL; code = chkptTriggerRecvMonitorHelper(pTask, NULL, &array1); EXPECT_EQ(code, TSDB_CODE_SUCCESS); @@ -268,3 +292,104 @@ TEST(ChkptTriggerRecvMonitorHelperTest, chkptTriggerRecvMonitorHelper) { taosArrayDestroy(array); taosArrayDestroy(array1); } + +TEST(StreamTaskSendCheckpointTriggerMsgTest, SendCheckpointTriggerMsgSuccessTest) { + SStreamTask* pTask = NULL; + int64_t uid = 2222222222222; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + const char *path = "/tmp/SendCheckpointTriggerMsgSuccessTest/stream"; + code = streamMetaOpen((path), NULL, NULL, NULL, 0, 0, NULL, &pTask->pMeta); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SRpcHandleInfo rpcInfo; + + int32_t ret = streamTaskSendCheckpointTriggerMsg(pTask, 123, 456, &rpcInfo, code); + + EXPECT_EQ(ret, TSDB_CODE_SUCCESS); +} + +TEST(streamTaskBuildCheckpointTest, streamTaskBuildCheckpointFnTest) { + SStreamTask* pTask = NULL; + int64_t uid = 2222222222222; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + const char *path = "/tmp/streamTaskBuildCheckpoinTest/stream"; + code = streamMetaOpen((path), NULL, NULL, NULL, 0, 0, NULL, &pTask->pMeta); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SStreamState *pState = streamStateOpen((char *)path, pTask, 0, 0); + ASSERT(pState != NULL); + + pTask->pBackend = pState->pTdbState->pOwner->pBackend; + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + char a[] = "localhost"; + memcpy(tsSnodeAddress, a, sizeof(a)); + + int32_t ret = streamTaskBuildCheckpoint(pTask); + + EXPECT_NE(ret, TSDB_CODE_SUCCESS); +} + +int32_t s3GetObjectToFileTest(const char *object_name, const char *fileName) { + return TSDB_CODE_SUCCESS; +} + +TEST(sstreamTaskGetTriggerRecvStatusTest, streamTaskGetTriggerRecvStatusFnTest) { + SStreamTask* pTask = NULL; + int64_t uid = 2222222222222; + SArray* array = taosArrayInit(4, POINTER_BYTES); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, NULL, false, 0, 0, array, + false, 1, &pTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + initTaskLock(pTask); + + SStreamTask upTask; + upTask = *pTask; + code = streamTaskSetUpstreamInfo(pTask, &upTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = streamTaskSetUpstreamInfo(pTask, &upTask); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + int32_t recv = 0; + int32_t total = 0; + pTask->info.taskLevel = TASK_LEVEL__SOURCE; + streamTaskGetTriggerRecvStatus(pTask, &recv, &total); + EXPECT_EQ(total, 1); + + pTask->info.taskLevel = TASK_LEVEL__AGG; + streamTaskGetTriggerRecvStatus(pTask, &recv, &total); + EXPECT_EQ(total, 2); + + code = streamTaskDownloadCheckpointData("123", "/root/download", 123); + EXPECT_NE(code, TSDB_CODE_SUCCESS); + + tcsInit(); + + code = uploadCheckpointToS3("123", "/tmp/backend5/stream"); + EXPECT_EQ(code, TSDB_CODE_SUCCESS); + + code = downloadCheckpointByNameS3("123", "/root/download", "123"); + EXPECT_EQ(code, TSDB_CODE_SUCCESS); +} From 0452c21ff600926e6936861444bffbe25c24ebc3 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 23 Dec 2024 19:10:10 +0800 Subject: [PATCH 27/45] ci(stream):add stream unit test --- source/libs/stream/test/streamCheckPointTest.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/test/streamCheckPointTest.cpp b/source/libs/stream/test/streamCheckPointTest.cpp index 17d8b55560..c8297d56b7 100644 --- a/source/libs/stream/test/streamCheckPointTest.cpp +++ b/source/libs/stream/test/streamCheckPointTest.cpp @@ -2,6 +2,7 @@ #include "tstream.h" #include "streamInt.h" #include "tcs.h" +#include "tglobal.h" #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wwrite-strings" @@ -386,10 +387,12 @@ TEST(sstreamTaskGetTriggerRecvStatusTest, streamTaskGetTriggerRecvStatusFnTest) EXPECT_NE(code, TSDB_CODE_SUCCESS); tcsInit(); + extern int8_t tsS3EpNum; + tsS3EpNum = 1; code = uploadCheckpointToS3("123", "/tmp/backend5/stream"); EXPECT_EQ(code, TSDB_CODE_SUCCESS); - code = downloadCheckpointByNameS3("123", "/root/download", "123"); - EXPECT_EQ(code, TSDB_CODE_SUCCESS); + code = downloadCheckpointByNameS3("123", "/root/download", ""); + EXPECT_NE(code, TSDB_CODE_OUT_OF_RANGE); } From 221d1553836517f3e61a18adc96a433bcdb24797 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 19:17:56 +0800 Subject: [PATCH 28/45] enh: ignore some tanalytics files in Jenkinsfile2 --- Jenkinsfile2 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 6fa3483099..7987164ec0 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -7,6 +7,8 @@ file_zh_changed = '' file_en_changed = '' file_no_doc_changed = '1' file_only_tdgpt_change_except = '1' +tdgpt_file = "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c\\|tdgpt_cases.task" + def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -78,7 +80,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : ''', returnStdout: true ).trim() @@ -570,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task/ ) { + if ( file_no_doc_changed =~ ${tdgpt_file} ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From 4b62a9a5872ab49ed7a5afbf470101d90096bc95 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 23 Dec 2024 19:30:36 +0800 Subject: [PATCH 29/45] fix:[TD-33272]add test case --- source/dnode/vnode/src/tq/tqSnapshot.c | 2 +- tests/parallel_test/cases.task | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tq/tqSnapshot.c b/source/dnode/vnode/src/tq/tqSnapshot.c index ddbbba57a0..ce5008e344 100644 --- a/source/dnode/vnode/src/tq/tqSnapshot.c +++ b/source/dnode/vnode/src/tq/tqSnapshot.c @@ -89,7 +89,7 @@ int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData) { TSDB_CHECK_NULL(ppData, code, lino, end, TSDB_CODE_INVALID_MSG); code = tdbTbcNext(pReader->pCur, &pKey, &kLen, &pVal, &vLen); - TSDB_CHECK_CODE(code, lino, end); + TSDB_CHECK_CONDITION(code == 0, code, lino, end, TDB_CODE_SUCCESS); *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + vLen); TSDB_CHECK_NULL(*ppData, code, lino, end, terrno); diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c9d28e0623..1a713bff5f 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -338,6 +338,7 @@ ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 3 -n 3 +,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-ntb-select.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-false.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-false.py -N 3 -n 3 From 0497642545e6b3894e8fc57d8bf340fa6a29bf2f Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 23 Dec 2024 19:38:13 +0800 Subject: [PATCH 30/45] fix issue --- include/common/tglobal.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 584c4b5775..501f1cabc1 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -293,6 +293,7 @@ extern int32_t tsMaxStreamBackendCache; extern int32_t tsPQSortMemThreshold; extern int32_t tsResolveFQDNRetryTime; extern bool tsStreamCoverage; +extern int8_t tsS3EpNum; extern bool tsExperimental; // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) From b0b0c30d393165e41bf84bf2c23955edbc1fb46a Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 20:20:57 +0800 Subject: [PATCH 31/45] Update Jenkinsfile2 --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 7987164ec0..8d6851e19c 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -572,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ ${tdgpt_file} ) { + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task|tanalytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From 1d4eb3b5b9745aa98f178dd6f641083b75e4169a Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 20:40:44 +0800 Subject: [PATCH 32/45] Update Jenkinsfile2 --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 8d6851e19c..2763076c5f 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -572,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task|tanalytics/ ) { + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task|tanalytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From bc10b0683a4e624c678cd579036ed850a5a6acfe Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 21:10:59 +0800 Subject: [PATCH 33/45] enh: set file strings as args in Jenkinsfile2 --- Jenkinsfile2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 2763076c5f..df0892cfa1 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -7,7 +7,7 @@ file_zh_changed = '' file_en_changed = '' file_no_doc_changed = '1' file_only_tdgpt_change_except = '1' -tdgpt_file = "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c\\|tdgpt_cases.task" +tdgpt_file = "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c\\|tdgpt_cases.task\\|analytics" def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -80,7 +80,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} " || : ''', returnStdout: true ).trim() @@ -572,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task|tanalytics/ ) { + if ( file_no_doc_changed =~ /${tdgpt_file}/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From f05dc19cff83cd28c1a1de3c94c769f7de513d87 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 21:14:14 +0800 Subject: [PATCH 34/45] enh: set file strings as args in Jenkinsfile2 --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index df0892cfa1..dd148dc447 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -80,7 +80,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} " || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} || : ''', returnStdout: true ).trim() From 493a23a4803da53ca7ae5ab61b47ac135ca247eb Mon Sep 17 00:00:00 2001 From: Haolin Wang Date: Tue, 24 Dec 2024 08:09:35 +0800 Subject: [PATCH 35/45] fix: free unallocated memory in tsdbRowClose() --- source/dnode/vnode/src/tsdb/tsdbCache.c | 6 ++---- source/dnode/vnode/src/tsdb/tsdbUtil.c | 4 +++- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 2047b68101..5151ea3958 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -610,6 +610,7 @@ int32_t tsdbLoadFromImem(SMemTable *imem, int64_t suid, int64_t uid) { int32_t nCol; SArray *ctxArray = pTsdb->rCache.ctxArray; STsdbRowKey tsdbRowKey = {0}; + STSDBRowIter iter = {0}; STbData *pIMem = tsdbGetTbDataFromMemTable(imem, suid, uid); @@ -641,7 +642,6 @@ int32_t tsdbLoadFromImem(SMemTable *imem, int64_t suid, int64_t uid) { tsdbRowGetKey(pMemRow, &tsdbRowKey); - STSDBRowIter iter = {0}; TAOS_CHECK_EXIT(tsdbRowIterOpen(&iter, pMemRow, pTSchema)); int32_t iCol = 0; @@ -685,7 +685,6 @@ int32_t tsdbLoadFromImem(SMemTable *imem, int64_t suid, int64_t uid) { STsdbRowKey tsdbRowKey = {0}; tsdbRowGetKey(pMemRow, &tsdbRowKey); - STSDBRowIter iter = {0}; TAOS_CHECK_EXIT(tsdbRowIterOpen(&iter, pMemRow, pTSchema)); int32_t iCol = 0; @@ -2470,6 +2469,7 @@ static int32_t tsdbCacheGetBatchFromMem(STsdb *pTsdb, tb_uid_t uid, SArray *pLas int numKeys = TARRAY_SIZE(pCidList); MemNextRowIter iter = {0}; SSHashObj *iColHash = NULL; + STSDBRowIter rowIter = {0}; // 1, get from mem, imem filtered with delete info TAOS_CHECK_EXIT(memRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->info.suid, pr->pReadSnap, pr)); @@ -2490,7 +2490,6 @@ static int32_t tsdbCacheGetBatchFromMem(STsdb *pTsdb, tb_uid_t uid, SArray *pLas STsdbRowKey rowKey = {0}; tsdbRowGetKey(pRow, &rowKey); - STSDBRowIter rowIter = {0}; TAOS_CHECK_EXIT(tsdbRowIterOpen(&rowIter, pRow, pTSchema)); int32_t iCol = 0, jCol = 0, jnCol = TARRAY_SIZE(pLastArray); @@ -2564,7 +2563,6 @@ static int32_t tsdbCacheGetBatchFromMem(STsdb *pTsdb, tb_uid_t uid, SArray *pLas STsdbRowKey tsdbRowKey = {0}; tsdbRowGetKey(pRow, &tsdbRowKey); - STSDBRowIter rowIter = {0}; TAOS_CHECK_EXIT(tsdbRowIterOpen(&rowIter, pRow, pTSchema)); iCol = 0; diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index f807ecf2d6..16f6777765 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -699,9 +699,11 @@ int32_t tsdbRowIterOpen(STSDBRowIter *pIter, TSDBROW *pRow, STSchema *pTSchema) } void tsdbRowClose(STSDBRowIter *pIter) { - if (pIter->pRow->type == TSDBROW_ROW_FMT) { + if (pIter->pRow && pIter->pRow->type == TSDBROW_ROW_FMT) { tRowIterClose(&pIter->pIter); } + pIter->pRow = NULL; + pIter->pIter = NULL; } SColVal *tsdbRowIterNext(STSDBRowIter *pIter) { From c1b92bc24fb1e2ca7a73e2b5f89335f0f6a4f330 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 24 Dec 2024 08:57:15 +0800 Subject: [PATCH 36/45] fix: stable inner join for single vgroup plan issue --- source/libs/planner/src/planOptimizer.c | 22 ++++++++++++++ tests/script/tsim/join/inner_join.sim | 38 +++++++++++++++++++++++++ tests/script/tsim/join/join.sim | 16 +++++++++++ 3 files changed, 76 insertions(+) diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 11cf926081..7085c8dc7c 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -6060,11 +6060,22 @@ static int32_t stbJoinOptCreateTagScanNode(SLogicNode* pJoin, SNodeList** ppList } SNode* pNode = NULL; + SName* pPrev = NULL; FOREACH(pNode, pList) { code = stbJoinOptRewriteToTagScan(pJoin, pNode); if (code) { break; } + + SScanLogicNode* pScan = (SScanLogicNode*)pNode; + if (pScan->pVgroupList && 1 == pScan->pVgroupList->numOfVgroups) { + if (NULL == pPrev || 0 == strcmp(pPrev->dbname, pScan->tableName.dbname)) { + pPrev = &pScan->tableName; + continue; + } + + pScan->needSplit = true; + } } if (TSDB_CODE_SUCCESS == code) { @@ -6156,6 +6167,7 @@ static int32_t stbJoinOptCreateTableScanNodes(SLogicNode* pJoin, SNodeList** ppL } int32_t i = 0; + SName* pPrev = NULL; SNode* pNode = NULL; FOREACH(pNode, pList) { SScanLogicNode* pScan = (SScanLogicNode*)pNode; @@ -6173,6 +6185,16 @@ static int32_t stbJoinOptCreateTableScanNodes(SLogicNode* pJoin, SNodeList** ppL *(srcScan + i++) = pScan->pVgroupList->numOfVgroups <= 1; pScan->scanType = SCAN_TYPE_TABLE; + + if (pScan->pVgroupList && 1 == pScan->pVgroupList->numOfVgroups) { + if (NULL == pPrev || 0 == strcmp(pPrev->dbname, pScan->tableName.dbname)) { + pPrev = &pScan->tableName; + continue; + } + + pScan->needSplit = true; + *(srcScan + i - 1) = false; + } } *ppList = pList; diff --git a/tests/script/tsim/join/inner_join.sim b/tests/script/tsim/join/inner_join.sim index 7b9209813d..3049865777 100644 --- a/tests/script/tsim/join/inner_join.sim +++ b/tests/script/tsim/join/inner_join.sim @@ -203,3 +203,41 @@ sql select a.ts, b.ts from sta a join sta b on a.ts = b.ts and a.t1 = b.t1; if $rows != 8 then return -1 endi + +sql select * from testb.stb1 a join testb.st2 b where a.ts = b.ts and a.t = b.t; +if $rows != 4 then + return -1 +endi +sql select * from testb.stb1 a join testb1.stb21 b where a.ts = b.ts and a.t = b.t; +if $rows != 4 then + return -1 +endi +sql select * from testb.stb1 b join testb1.stb21 a where a.ts = b.ts and a.t = b.t; +if $rows != 4 then + return -1 +endi +sql select * from testb.stb1 a join testb1.stb21 b where b.ts = a.ts and b.t = a.t; +if $rows != 4 then + return -1 +endi +sql select * from testb.stb1 b join testb1.stb21 a where b.ts = a.ts and b.t = a.t; +if $rows != 4 then + return -1 +endi +sql select * from testb.stb1 b join testb1.stb21 a where a.ts = b.ts and b.t = a.t; +if $rows != 4 then + return -1 +endi +sql select * from testb.stb1 b join testb1.stb21 a where b.ts = a.ts and a.t = b.t; +if $rows != 4 then + return -1 +endi +sql select * from testb.stb1 a, testb1.stb21 b where a.ts = b.ts and a.t = b.t; +if $rows != 4 then + return -1 +endi +sql select * from testb.stb1 a join testb1.stb21 b on a.ts = b.ts and a.t = b.t; +if $rows != 4 then + return -1 +endi + diff --git a/tests/script/tsim/join/join.sim b/tests/script/tsim/join/join.sim index e3a04fc9c1..43b5b421ab 100644 --- a/tests/script/tsim/join/join.sim +++ b/tests/script/tsim/join/join.sim @@ -57,6 +57,22 @@ sql insert into ctb22 using st2 tags(2) values('2023-10-16 09:10:12', 110222, 11 sql insert into ctb23 using st2 tags(3) values('2023-10-16 09:10:13', 110223, 1102230); sql insert into ctb24 using st2 tags(4) values('2023-10-16 09:10:14', 110224, 1102240); +sql drop database if exists testb1; +sql create database testb1 vgroups 1 PRECISION 'us'; +sql use testb1; + +sql create table stb21(ts timestamp, f int,g int) tags (t int); +sql insert into ctb11 using stb21 tags(1) values('2023-10-16 09:10:11', 110111, 1101110); +sql insert into ctb12 using stb21 tags(2) values('2023-10-16 09:10:12', 110112, 1101120); +sql insert into ctb13 using stb21 tags(3) values('2023-10-16 09:10:13', 110113, 1101130); +sql insert into ctb14 using stb21 tags(4) values('2023-10-16 09:10:14', 110114, 1101140); + +sql create table st22(ts timestamp, f int, g int) tags (t int); +sql insert into ctb21 using st22 tags(1) values('2023-10-16 09:10:11', 110221, 1102210); +sql insert into ctb22 using st22 tags(2) values('2023-10-16 09:10:12', 110222, 1102220); +sql insert into ctb23 using st22 tags(3) values('2023-10-16 09:10:13', 110223, 1102230); +sql insert into ctb24 using st22 tags(4) values('2023-10-16 09:10:14', 110224, 1102240); + sql drop database if exists testc; sql create database testc vgroups 3; sql use testc; From d3d48d93aa37e8f5484b586afe4d891435db916b Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 24 Dec 2024 09:18:28 +0800 Subject: [PATCH 37/45] revert Jenkinsfile2 --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index dd148dc447..fc00c5e2dc 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -572,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /${tdgpt_file}/ ) { + if ( file_no_doc_changed =~ /orecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From e65a3802a23223b54d252d5dd1a289dded3797d7 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 24 Dec 2024 09:21:09 +0800 Subject: [PATCH 38/45] fix:[TD-33272]add test case --- source/client/test/smlTest.cpp | 2 +- source/dnode/vnode/src/tq/tqOffset.c | 2 +- source/dnode/vnode/test/CMakeLists.txt | 30 ++++++++++++++------------ source/libs/parser/src/parInsertSml.c | 6 +++--- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index 87be16e467..d64cd9c971 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -276,7 +276,7 @@ TEST(testCase, smlParseCols_Test) { info->dataFormat = false; SSmlLineInfo elements = {0}; info->msgBuf = msgBuf; - ASSERT_EQ(smlInitHandle(NULL), 0); + ASSERT_EQ(smlInitHandle(NULL), TSDB_CODE_INVALID_PARA); const char *data = "st,t=1 cb\\=in=\"pass\\,it " diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index a131f30a04..6996f3578c 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -95,7 +95,7 @@ END: if (code != 0){ tqError("%s failed at %d since %s", __func__, lino, tstrerror(code)); } - taosCloseFile(&pFile); + (void)taosCloseFile(&pFile); taosMemoryFree(pMemBuf); tDeleteSTqOffset(pOffset); diff --git a/source/dnode/vnode/test/CMakeLists.txt b/source/dnode/vnode/test/CMakeLists.txt index 826296e99f..ff7e5a2196 100644 --- a/source/dnode/vnode/test/CMakeLists.txt +++ b/source/dnode/vnode/test/CMakeLists.txt @@ -3,23 +3,25 @@ MESSAGE(STATUS "tq unit test") # GoogleTest requires at least C++11 SET(CMAKE_CXX_STANDARD 11) -add_executable(tqTest tqTest.cpp) -target_include_directories(tqTest - PUBLIC - "${CMAKE_CURRENT_SOURCE_DIR}/../inc" -) +IF(NOT TD_WINDOWS) + add_executable(tqTest tqTest.cpp) + target_include_directories(tqTest + PUBLIC + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) -TARGET_LINK_LIBRARIES( - tqTest - PUBLIC os util common vnode gtest_main -) + TARGET_LINK_LIBRARIES( + tqTest + PUBLIC os util common vnode gtest_main + ) -enable_testing() + enable_testing() -add_test( - NAME tq_test - COMMAND tqTest -) + add_test( + NAME tq_test + COMMAND tqTest + ) +ENDIF() # ADD_EXECUTABLE(tsdbSmaTest tsdbSmaTest.cpp) # TARGET_LINK_LIBRARIES( diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index bf86ef718e..676aed4464 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -123,7 +123,7 @@ static int32_t smlBuildTagRow(SArray* cols, SBoundColInfo* tags, SSchema* pSchem val.pData = (uint8_t*)kv->value; val.nData = kv->length; } else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) { - code = smlMbsToUcs4(kv->value, kv->length, (void**)&val.pData, &val.nData, kv->length * TSDB_NCHAR_SIZE, charsetCxt); + code = smlMbsToUcs4(kv->value, kv->length, (void**)&val.pData, (int32_t*)&val.nData, kv->length * TSDB_NCHAR_SIZE, charsetCxt); TSDB_CHECK_CODE(code, lino, end); } else { (void)memcpy(&val.i64, &(kv->value), kv->length); @@ -316,7 +316,7 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc kv->i = convertTimePrecision(kv->i, TSDB_TIME_PRECISION_NANO, pTableMeta->tableInfo.precision); } if (kv->type == TSDB_DATA_TYPE_NCHAR) { - ret = smlMbsToUcs4(kv->value, kv->length, (void**)&pVal->value.pData, &pVal->value.nData, pColSchema->bytes - VARSTR_HEADER_SIZE, charsetCxt); + ret = smlMbsToUcs4(kv->value, kv->length, (void**)&pVal->value.pData, (int32_t*)&pVal->value.nData, pColSchema->bytes - VARSTR_HEADER_SIZE, charsetCxt); TSDB_CHECK_CODE(ret, lino, end); } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; @@ -345,7 +345,7 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc end: if (ret != 0){ uError("%s failed at %d since %s", __func__, lino, tstrerror(ret)); - buildInvalidOperationMsg(&pBuf, tstrerror(ret)); + ret = buildInvalidOperationMsg(&pBuf, tstrerror(ret)); } insDestroyBoundColInfo(&bindTags); tdDestroySVCreateTbReq(pCreateTblReq); From f6bd86272bfb1329754210a7ae35737be866fc47 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 24 Dec 2024 09:42:26 +0800 Subject: [PATCH 39/45] udf case: varchar --- include/libs/function/taosudf.h | 26 ++++++----- source/libs/function/test/change_udf.c | 64 ++++++++++++++++++++++++++ tests/pytest/util/tserror.py | 3 ++ tests/system-test/0-others/udfTest.py | 33 +++++++++++++ 4 files changed, 114 insertions(+), 12 deletions(-) diff --git a/include/libs/function/taosudf.h b/include/libs/function/taosudf.h index 91487e5d1d..fd6b42f61e 100644 --- a/include/libs/function/taosudf.h +++ b/include/libs/function/taosudf.h @@ -237,18 +237,20 @@ static FORCE_INLINE int32_t udfColDataSet(SUdfColumn *pColumn, uint32_t currentR (void)memcpy(data->fixLenCol.data + meta->bytes * currentRow, pData, meta->bytes); } else { int32_t dataLen = varDataTLen(pData); - if (meta->type == TSDB_DATA_TYPE_JSON) { - if (*pData == TSDB_DATA_TYPE_NULL) { - dataLen = 0; - } else if (*pData == TSDB_DATA_TYPE_NCHAR) { - dataLen = varDataTLen(pData + sizeof(char)); - } else if (*pData == TSDB_DATA_TYPE_BIGINT || *pData == TSDB_DATA_TYPE_DOUBLE) { - dataLen = sizeof(int64_t); - } else if (*pData == TSDB_DATA_TYPE_BOOL) { - dataLen = sizeof(char); - } - dataLen += sizeof(char); - } + // This is a piece of code to help users implement udf. It is only called during testing. + // Currently, the json type is not supported and will not be called. + // if (meta->type == TSDB_DATA_TYPE_JSON) { + // if (*pData == TSDB_DATA_TYPE_NULL) { + // dataLen = 0; + // } else if (*pData == TSDB_DATA_TYPE_NCHAR) { + // dataLen = varDataTLen(pData + sizeof(char)); + // } else if (*pData == TSDB_DATA_TYPE_BIGINT || *pData == TSDB_DATA_TYPE_DOUBLE) { + // dataLen = sizeof(int64_t); + // } else if (*pData == TSDB_DATA_TYPE_BOOL) { + // dataLen = sizeof(char); + // } + // dataLen += sizeof(char); + // } if (data->varLenCol.payloadAllocLen < data->varLenCol.payloadLen + dataLen) { uint32_t newSize = data->varLenCol.payloadAllocLen; diff --git a/source/libs/function/test/change_udf.c b/source/libs/function/test/change_udf.c index 822c4da892..f623a3194d 100644 --- a/source/libs/function/test/change_udf.c +++ b/source/libs/function/test/change_udf.c @@ -106,3 +106,67 @@ DLL_EXPORT int32_t UDFNAME(SUdfDataBlock *block, SUdfColumn *resultCol) { #endif // ifdef CHANGE_UDF_PROCESS_FAILED } #endif // ifdef CHANGE_UDF_NO_PROCESS + + + + +/********************************************************************************************************************/ +// udf revert functions +/********************************************************************************************************************/ +DLL_EXPORT int32_t udf_reverse_init() { return 0; } + +DLL_EXPORT int32_t udf_reverse_destroy() { return 0; } + +static void reverse_data(char* data, size_t len) { + size_t i, j; + char temp; + for (i = 0, j = len - 1; i < j; i++, j--) { + temp = data[i]; + data[i] = data[j]; + data[j] = temp; + } +} + +DLL_EXPORT int32_t udf_reverse(SUdfDataBlock *block, SUdfColumn *resultCol) { + int32_t code = 0; + SUdfColumnData *resultData = &resultCol->colData; + for (int32_t i = 0; i < block->numOfRows; ++i) { + int j = 0; + for (; j < block->numOfCols; ++j) { + if (udfColDataIsNull(block->udfCols[j], i)) { + code = udfColDataSetNull(resultCol, i); + if (code != 0) { + return code; + } + break; + } else { + int32_t oldLen = udfColDataGetDataLen(block->udfCols[j], i); + char *pOldData = udfColDataGetData(block->udfCols[j], i); + + + char *buff = malloc(sizeof(VarDataLenT) + oldLen); + if (buff == NULL) { + return -1; + } + ((VarDataLenT *)buff)[0] = (VarDataLenT)oldLen; + memcpy(buff, pOldData, oldLen + sizeof(VarDataLenT)); + reverse_data(buff + sizeof(VarDataLenT), oldLen); + code = udfColDataSet(resultCol, i, buff, false); + if (code != 0) { + free(buff); + return code; + } + } + } + } + // to simulate actual processing delay by udf +#ifdef LINUX + usleep(1 * 1000); // usleep takes sleep time in us (1 millionth of a second) +#endif +#ifdef WINDOWS + Sleep(1); +#endif + resultData->numOfRows = block->numOfRows; + return 0; +} + diff --git a/tests/pytest/util/tserror.py b/tests/pytest/util/tserror.py index c0f658209c..35d74153c3 100644 --- a/tests/pytest/util/tserror.py +++ b/tests/pytest/util/tserror.py @@ -8,3 +8,6 @@ TSDB_CODE_MND_FUNC_NOT_EXIST = (TAOS_DEF_ERROR_CODE | 0x0374) TSDB_CODE_UDF_FUNC_EXEC_FAILURE = (TAOS_DEF_ERROR_CODE | 0x290A) + + +TSDB_CODE_TSC_INTERNAL_ERROR = (TAOS_DEF_ERROR_CODE | 0x02FF) diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index 9bc7d4360c..165ac5bc7d 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -728,6 +728,38 @@ class TDTestCase: tdSql.error(f"select {func_name}(num1) from db.tb", TSDB_CODE_MND_FUNC_NOT_EXIST) tdLog.info(f"change udf test finished, using {lib_name}") + def test_change_udf_reverse(self): + tdSql.execute("create database if not exists db duration 100") + tdSql.execute("use db") + + func_name = "udf_reverse" + tdSql.execute(f"create function {func_name} as '%s' outputtype nchar(256)"%self.libchange_udf_normal) + functions = tdSql.getResult("show functions") + for function in functions: + if f"{func_name}" in function[0]: + tdLog.info(f"create {func_name} functions success, using {self.libchange_udf_normal}") + break + + tdSql.query(f"select {func_name}(c8) from db.t1") + tdSql.execute(f"drop function {func_name}") + tdSql.error(f"select {func_name}(num1) from db.tb", TSDB_CODE_MND_FUNC_NOT_EXIST) + + self.test_change_udf_normal("change_udf_normal") + tdSql.execute(f"create function {func_name} as '%s' outputtype varchar(256)"%self.libchange_udf_normal) + functions = tdSql.getResult("show functions") + for function in functions: + if f"{func_name}" in function[0]: + tdLog.info(f"create {func_name} functions success, using {self.libchange_udf_normal}") + break + + tdSql.query(f"select {func_name}(c8) from db.t1 order by ts") + tdSql.checkData(0,0, None) + tdSql.checkData(1,0, "1yranib") + tdSql.checkData(2,0, "2yranib") + tdSql.checkData(3,0, "3yranib") + + + def unexpected_using_test(self): tdSql.execute("use db ") @@ -768,6 +800,7 @@ class TDTestCase: self.unexpected_using_test() self.create_udf_function() + self.test_change_udf_reverse() self.basic_udf_query() self.loop_kill_udfd() tdSql.execute(" drop function udf1 ") From f9976c75ef4cbf3e5ed8fcb136646d56d6abe1de Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 24 Dec 2024 09:54:05 +0800 Subject: [PATCH 40/45] fix:error rewrite if terrno is not 0 --- source/dnode/vnode/src/vnd/vnodeSync.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 50bedba75d..cea82c13ff 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -313,7 +313,6 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) if (code != 0) { if (code != TSDB_CODE_MSG_PREPROCESSED) { vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, tstrerror(code)); - if (terrno != 0) code = terrno; } vnodeHandleProposeError(pVnode, pMsg, code); rpcFreeCont(pMsg->pCont); From 20c1e28d0c56707972f667c1c1bc21f83bc61023 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 24 Dec 2024 10:08:02 +0800 Subject: [PATCH 41/45] fix:[TD-33272]add test case --- source/dnode/vnode/src/tq/tq.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 37f3572f65..73052c1e5e 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -151,10 +151,8 @@ void tqClose(STQ* pTq) { taosHashCleanup(pTq->pOffset); taosMemoryFree(pTq->path); tqMetaClose(pTq); - - streamMetaClose(pTq->pStreamMeta); - qDebug("vgId:%d end to close tq", pTq->pStreamMeta != NULL ? pTq->pStreamMeta->vgId : -1); + streamMetaClose(pTq->pStreamMeta); taosMemoryFree(pTq); } From 7e21747c4ff962fd1e4864fe3678514ba5596491 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 24 Dec 2024 14:17:37 +0800 Subject: [PATCH 42/45] ci(test):add slice state ut --- source/libs/stream/src/streamSliceState.c | 2 + .../libs/stream/test/streamSliceStateTest.cpp | 75 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 source/libs/stream/test/streamSliceStateTest.cpp diff --git a/source/libs/stream/src/streamSliceState.c b/source/libs/stream/src/streamSliceState.c index eefe046878..976f806032 100644 --- a/source/libs/stream/src/streamSliceState.c +++ b/source/libs/stream/src/streamSliceState.c @@ -112,6 +112,7 @@ void clearSearchBuff(SStreamFileState* pFileState) { } } +#ifdef BUILD_NO_CALL int32_t getStateFromRocksdbByCur(SStreamFileState* pFileState, SStreamStateCur* pCur, SWinKey* pResKey, SRowBuffPos** ppPos, int32_t* pVLen, int32_t* pWinCode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; @@ -135,6 +136,7 @@ _end: } return code; } +#endif int32_t getHashSortNextRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal, int32_t* pVLen, int32_t* pWinCode) { diff --git a/source/libs/stream/test/streamSliceStateTest.cpp b/source/libs/stream/test/streamSliceStateTest.cpp new file mode 100644 index 0000000000..1d62b8ca7b --- /dev/null +++ b/source/libs/stream/test/streamSliceStateTest.cpp @@ -0,0 +1,75 @@ +#include +#include "tstream.h" +#include "streamInt.h" +#include "tcs.h" +#include "tglobal.h" +#include "streamState.h" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wformat" +#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" +#pragma GCC diagnostic ignored "-Wpointer-arith" + + +SStreamState *stateCreate2(const char *path) { + SStreamTask *pTask = (SStreamTask *)taosMemoryCalloc(1, sizeof(SStreamTask)); + pTask->ver = 1024; + pTask->id.streamId = 1023; + pTask->id.taskId = 1111111; + SStreamMeta *pMeta = NULL; + + int32_t code = streamMetaOpen((path), NULL, NULL, NULL, 0, 0, NULL, &pMeta); + pTask->pMeta = pMeta; + + SStreamState *p = streamStateOpen((char *)path, pTask, 0, 0); + ASSERT(p != NULL); + return p; +} +void *backendOpen2() { + streamMetaInit(); + const char *path = "/tmp/streamslicestate/"; + SStreamState *p = stateCreate2(path); + ASSERT(p != NULL); + return p; +} + +TSKEY compareTs1(void* pKey) { + SWinKey* pWinKey = (SWinKey*)pKey; + return pWinKey->ts; +} + +TEST(getHashSortNextRowFn, getHashSortNextRowTest) { + void *pState = backendOpen2(); + SStreamFileState *pFileState = NULL; + int32_t code = streamFileStateInit(1024, sizeof(SWinKey), 10, 0, compareTs1, pState, INT64_MAX, "aaa", 123, + STREAM_STATE_BUFF_HASH, &pFileState); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SWinKey key1; + key1.groupId = 123; + key1.ts = 456; + char str[] = "abc"; + code = streamStateFillPut_rocksdb((SStreamState*)pState, &key1, str, sizeof(str)); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SWinKey key2; + key2.groupId = 123; + key2.ts = 460; + code = streamStateFillPut_rocksdb((SStreamState*)pState, &key2, str, sizeof(str)); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + SWinKey key3; + key3.groupId = 123; + void* pVal = NULL; + int32_t len = 0; + int32_t wincode = 0; + code = getHashSortNextRow(pFileState, &key1, &key3, &pVal, &len, &wincode); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + + ASSERT_EQ(key3.ts, key2.ts); +} From 78b5396f861393e4ecc00876689a88d81b333da4 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 24 Dec 2024 14:45:18 +0800 Subject: [PATCH 43/45] fix: test error --- tests/pytest/util/sql.py | 7 +++---- tests/system-test/0-others/udfTest.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 46b7e1f795..bb7b8411f9 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -155,6 +155,9 @@ class TDSql: try: self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) except BaseException as e: tdLog.info("err:%s" % (e)) expectErrNotOccured = False @@ -165,10 +168,6 @@ class TDSql: if expectErrNotOccured: tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql)) else: - self.queryRows = 0 - self.queryCols = 0 - self.queryResult = None - if fullMatched: if expectedErrno != None: expectedErrno_rest = expectedErrno & 0x0000ffff diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index 165ac5bc7d..8134327b41 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -740,7 +740,7 @@ class TDTestCase: tdLog.info(f"create {func_name} functions success, using {self.libchange_udf_normal}") break - tdSql.query(f"select {func_name}(c8) from db.t1") + tdSql.error(f"select {func_name}(c8) from db.t1", TSDB_CODE_TSC_INTERNAL_ERROR) tdSql.execute(f"drop function {func_name}") tdSql.error(f"select {func_name}(num1) from db.tb", TSDB_CODE_MND_FUNC_NOT_EXIST) From 27af7478719fe0627748e8725a2c3498edabf086 Mon Sep 17 00:00:00 2001 From: yingzhao Date: Sun, 22 Dec 2024 21:22:29 +0800 Subject: [PATCH 44/45] docs(grafana): integrate in explorer --- docs/zh/08-operation/05-monitor.md | 44 ++++++++++++++++++ docs/zh/08-operation/pic/grafana-apikey.png | Bin 0 -> 62579 bytes .../zh/08-operation/pic/grafana-dashboard.png | Bin 0 -> 111836 bytes 3 files changed, 44 insertions(+) create mode 100644 docs/zh/08-operation/pic/grafana-apikey.png create mode 100644 docs/zh/08-operation/pic/grafana-dashboard.png diff --git a/docs/zh/08-operation/05-monitor.md b/docs/zh/08-operation/05-monitor.md index abbd54736b..897f813872 100644 --- a/docs/zh/08-operation/05-monitor.md +++ b/docs/zh/08-operation/05-monitor.md @@ -145,3 +145,47 @@ toasX 的配置文件(默认 /etc/taos/taosx.toml) 中与 monitor 相关的配 #### 限制 只有在以 server 模式运行 taosX 时,与监控相关的配置才生效。 + +## explorer 集成监控面板 + +explorer 支持集成已有的 grafana dashboard。 + +### 配置 grafana + +编辑 grafana.ini, 修改以下配置项。配置 root_url, 可能对现有的 grafana 使用习惯有所影响,为了集成到 explorer 是需要如此配置的, 方便通过 explorer 做服务代理。 + +``` toml +[server] +# If you use reverse proxy and sub path specify full url (with sub path) +root_url = http://ip:3000/grafana +# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons. +serve_from_sub_path = true + +[security] +# set to true if you want to allow browsers to render Grafana in a ,