From bfc279835cd628cac45f1dd014ff30aca9d6c75a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Mon, 10 Apr 2023 16:52:41 +0800 Subject: [PATCH 01/42] add cancel in fetch cluster --- source/dnode/mnode/impl/src/mndCluster.c | 27 +++++++++++++++--------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index 94584dfe58..ea2a5857c6 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -67,7 +67,7 @@ int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len) { return 0; } -static SClusterObj *mndAcquireCluster(SMnode *pMnode) { +static SClusterObj *mndAcquireCluster(SMnode *pMnode, void **ppIter) { SSdb *pSdb = pMnode->pSdb; void *pIter = NULL; @@ -76,23 +76,27 @@ static SClusterObj *mndAcquireCluster(SMnode *pMnode) { pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster); if (pIter == NULL) break; + *ppIter = pIter; + return pCluster; } return NULL; } -static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster) { +static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster, void *pIter) { SSdb *pSdb = pMnode->pSdb; + sdbCancelFetch(pSdb, pIter); sdbRelease(pSdb, pCluster); } int64_t mndGetClusterId(SMnode *pMnode) { int64_t clusterId = 0; - SClusterObj *pCluster = mndAcquireCluster(pMnode); + void *pIter = NULL; + SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter); if (pCluster != NULL) { clusterId = pCluster->id; - mndReleaseCluster(pMnode, pCluster); + mndReleaseCluster(pMnode, pCluster, pIter); } return clusterId; @@ -100,10 +104,11 @@ int64_t mndGetClusterId(SMnode *pMnode) { int64_t mndGetClusterCreateTime(SMnode *pMnode) { int64_t createTime = 0; - SClusterObj *pCluster = mndAcquireCluster(pMnode); + void **ppIter = NULL; + SClusterObj *pCluster = mndAcquireCluster(pMnode, ppIter); if (pCluster != NULL) { createTime = pCluster->createdTime; - mndReleaseCluster(pMnode, pCluster); + mndReleaseCluster(pMnode, pCluster, *ppIter); } return createTime; @@ -121,10 +126,11 @@ static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) { float mndGetClusterUpTime(SMnode *pMnode) { int64_t upTime = 0; - SClusterObj *pCluster = mndAcquireCluster(pMnode); + void *pIter = NULL; + SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter); if (pCluster != NULL) { upTime = mndGetClusterUpTimeImp(pCluster); - mndReleaseCluster(pMnode, pCluster); + mndReleaseCluster(pMnode, pCluster, pIter); } return upTime / 86400.0f; @@ -321,11 +327,12 @@ static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter) { static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SClusterObj clusterObj = {0}; - SClusterObj *pCluster = mndAcquireCluster(pMnode); + void *pIter = NULL; + SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter); if (pCluster != NULL) { memcpy(&clusterObj, pCluster, sizeof(SClusterObj)); clusterObj.upTime += tsUptimeInterval; - mndReleaseCluster(pMnode, pCluster); + mndReleaseCluster(pMnode, pCluster, pIter); } if (clusterObj.id <= 0) { From adf25d3300547bc31907d4418aeb2ba0f5e2bd0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Mon, 10 Apr 2023 19:12:13 +0800 Subject: [PATCH 02/42] null pointer --- source/dnode/mnode/impl/src/mndCluster.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index ea2a5857c6..4d05637a2b 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -104,11 +104,11 @@ int64_t mndGetClusterId(SMnode *pMnode) { int64_t mndGetClusterCreateTime(SMnode *pMnode) { int64_t createTime = 0; - void **ppIter = NULL; - SClusterObj *pCluster = mndAcquireCluster(pMnode, ppIter); + void *pIter = NULL; + SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter); if (pCluster != NULL) { createTime = pCluster->createdTime; - mndReleaseCluster(pMnode, pCluster, *ppIter); + mndReleaseCluster(pMnode, pCluster, pIter); } return createTime; From d163a16f3a1e02128250b1b2327bf7014a952030 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 11 Apr 2023 15:46:32 +0800 Subject: [PATCH 03/42] fix: fix max/min(tag) random result --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 29990f2d06..ea834e90ca 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -484,7 +484,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int int32_t code = 0; // backup the rows - int32_t backupRows = pBlock->info.rows; + int32_t backupRows = (rows == 1) ? rows : pBlock->info.rows; pBlock->info.rows = rows; bool freeReader = false; From bbae7259cf3a8dffca2cfbf8c874eb66afcc95df Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 11 Apr 2023 15:47:27 +0800 Subject: [PATCH 04/42] add test cases --- tests/system-test/2-query/max.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index b8da02b9a6..ba6ab53fc7 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -20,8 +20,8 @@ class TDTestCase: intData = [] floatData = [] tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, - col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') - tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')") + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(t0 tinyint, t1 float, loc nchar(20))''') + tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags(5, 5.5, 'beijing')") for i in range(self.rowNum): tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) @@ -55,13 +55,20 @@ class TDTestCase: tdSql.checkData(0, 1, np.max(intData)) tdSql.query(f"select ts, min(col9) from {dbname}.stb") - tdSql.checkRows(1) + tdSql.checkRows(1) tdSql.checkData(0, 1, np.min(floatData)) tdSql.query(f"select ts, min(col9) from {dbname}.stb_1") - tdSql.checkRows(1) + tdSql.checkRows(1) tdSql.checkData(0, 1, np.min(floatData)) + # check tags + tdSql.query(f"select max(t0) from {dbname}.stb") + tdSql.checkData(0,0,5) + + tdSql.query(f"select max(t1) from {dbname}.stb") + tdSql.checkData(0,0,5.5) + def max_check_ntb_base(self, dbname="db"): tdSql.prepare() intData = [] From 9961f824797b926fb7b6e5831e1fc15216274470 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 11 Apr 2023 16:52:56 +0800 Subject: [PATCH 05/42] fix(udf1): use 1 as luck number to make new gcc happy --- source/libs/function/test/udf1.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/function/test/udf1.c b/source/libs/function/test/udf1.c index 71d30b6755..0dc53f006f 100644 --- a/source/libs/function/test/udf1.c +++ b/source/libs/function/test/udf1.c @@ -31,7 +31,7 @@ DLL_EXPORT int32_t udf1(SUdfDataBlock *block, SUdfColumn *resultCol) { } } if (j == block->numOfCols) { - int32_t luckyNum = 88; + int32_t luckyNum = 1; udfColDataSet(resultCol, i, (char *)&luckyNum, false); } } @@ -43,4 +43,4 @@ DLL_EXPORT int32_t udf1(SUdfDataBlock *block, SUdfColumn *resultCol) { Sleep(1); #endif return 0; -} \ No newline at end of file +} From 0201348bbe1089710c3af53e66029996d11495ac Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 11 Apr 2023 15:47:27 +0800 Subject: [PATCH 06/42] add test cases --- source/libs/executor/src/scanoperator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ea834e90ca..f0827886a0 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -484,8 +484,8 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int int32_t code = 0; // backup the rows - int32_t backupRows = (rows == 1) ? rows : pBlock->info.rows; - pBlock->info.rows = rows; + int32_t backupRows = pBlock->info.rows; + pBlock->info.rows = (rows < pBlock->info.rows) ? pBlock->info.rows : rows; bool freeReader = false; STableCachedVal val = {0}; From cb690ee2eaf90d9b2f87fb3604b7581939ca6601 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 11 Apr 2023 18:46:16 +0800 Subject: [PATCH 07/42] fix(test/udf): use 1 as lucky number instead of 88 --- tests/script/tsim/valgrind/checkUdf.sim | 16 +++--- tests/system-test/0-others/udfTest.py | 50 ++++++++--------- tests/system-test/0-others/udf_cfg2.py | 50 ++++++++--------- tests/system-test/0-others/udf_create.py | 50 ++++++++--------- .../system-test/0-others/udf_restart_taosd.py | 54 +++++++++---------- 5 files changed, 110 insertions(+), 110 deletions(-) diff --git a/tests/script/tsim/valgrind/checkUdf.sim b/tests/script/tsim/valgrind/checkUdf.sim index caf316bd86..b811834211 100644 --- a/tests/script/tsim/valgrind/checkUdf.sim +++ b/tests/script/tsim/valgrind/checkUdf.sim @@ -29,10 +29,10 @@ sql select udf1(f) from t; if $rows != 2 then return -1 endi -if $data00 != 88 then +if $data00 != 1 then return -1 endi -if $data10 != 88 then +if $data10 != 1 then return -1 endi @@ -51,10 +51,10 @@ sql select udf1(f1, f2) from t2; if $rows != 2 then return -1 endi -if $data00 != 88 then +if $data00 != 1 then return -1 endi -if $data10 != 88 then +if $data10 != 1 then return -1 endi @@ -72,10 +72,10 @@ print $rows , $data00 , $data10 , $data20 , $data30 if $rows != 4 then return -1 endi -if $data00 != 88 then +if $data00 != 1 then return -1 endi -if $data10 != 88 then +if $data10 != 1 then return -1 endi @@ -114,10 +114,10 @@ print $rows , $data00 , $data01 if $rows != 1 then return -1 endi -if $data00 != 176.000000000 then +if $data00 != 2.000000000 then return -1 endi -if $data01 != 152.420471066 then +if $data01 != 1.732050808 then return -1 endi diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index 15253df0c4..30451e75b8 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -191,20 +191,20 @@ class TDTestCase: tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) tdSql.checkData(0,2,1) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.checkData(0,4,1.000000000) - tdSql.checkData(0,5,88) + tdSql.checkData(0,5,1) tdSql.checkData(0,6,"binary1") - tdSql.checkData(0,7,88) + tdSql.checkData(0,7,1) tdSql.checkData(3,0,3) - tdSql.checkData(3,1,88) + tdSql.checkData(3,1,1) tdSql.checkData(3,2,33333) - tdSql.checkData(3,3,88) + tdSql.checkData(3,3,1) tdSql.checkData(3,4,33.000000000) - tdSql.checkData(3,5,88) + tdSql.checkData(3,5,1) tdSql.checkData(3,6,"binary1") - tdSql.checkData(3,7,88) + tdSql.checkData(3,7,1) tdSql.checkData(11,0,None) tdSql.checkData(11,1,None) @@ -213,7 +213,7 @@ class TDTestCase: tdSql.checkData(11,4,None) tdSql.checkData(11,5,None) tdSql.checkData(11,6,"binary1") - tdSql.checkData(11,7,88) + tdSql.checkData(11,7,1) tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1") tdSql.checkData(0,0,None) @@ -226,13 +226,13 @@ class TDTestCase: tdSql.checkData(0,7,None) tdSql.checkData(20,0,8) - tdSql.checkData(20,1,88) + tdSql.checkData(20,1,1) tdSql.checkData(20,2,88888) - tdSql.checkData(20,3,88) + tdSql.checkData(20,3,1) tdSql.checkData(20,4,888) - tdSql.checkData(20,5,88) + tdSql.checkData(20,5,1) tdSql.checkData(20,6,88) - tdSql.checkData(20,7,88) + tdSql.checkData(20,7,1) # aggregate functions @@ -375,14 +375,14 @@ class TDTestCase: tdSql.checkRows(25) tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) - tdSql.checkData(1,0,88) + tdSql.checkData(1,0,1) tdSql.checkData(1,1,8) tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;") tdSql.checkRows(13) - tdSql.checkData(0,0,88) + tdSql.checkData(0,0,1) tdSql.checkData(0,1,8) - tdSql.checkData(1,0,88) + tdSql.checkData(1,0,1) tdSql.checkData(1,1,7) # bug fix for crash @@ -401,9 +401,9 @@ class TDTestCase: tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts") tdSql.checkRows(3) tdSql.checkData(0,0,9) - tdSql.checkData(0,1,88) + tdSql.checkData(0,1,1) tdSql.checkData(0,2,-99.990000000) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,0) @@ -412,20 +412,20 @@ class TDTestCase: tdSql.checkData(1,1,10) tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") - tdSql.checkData(0,0,88) - tdSql.checkData(0,1,88) - tdSql.checkData(1,0,88) - tdSql.checkData(1,1,88) + tdSql.checkData(0,0,1) + tdSql.checkData(0,1,1) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,1) tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,0) - tdSql.checkData(0,1,88) + tdSql.checkData(0,1,1) tdSql.checkData(0,2,0) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.checkData(1,0,1) - tdSql.checkData(1,1,88) + tdSql.checkData(1,1,1) tdSql.checkData(1,2,10) - tdSql.checkData(1,3,88) + tdSql.checkData(1,3,1) tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,16.881943016) diff --git a/tests/system-test/0-others/udf_cfg2.py b/tests/system-test/0-others/udf_cfg2.py index 869cb098e2..f00a0eed69 100644 --- a/tests/system-test/0-others/udf_cfg2.py +++ b/tests/system-test/0-others/udf_cfg2.py @@ -193,20 +193,20 @@ class TDTestCase: tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) tdSql.checkData(0,2,1) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.checkData(0,4,1.000000000) - tdSql.checkData(0,5,88) + tdSql.checkData(0,5,1) tdSql.checkData(0,6,"binary1") - tdSql.checkData(0,7,88) + tdSql.checkData(0,7,1) tdSql.checkData(3,0,3) - tdSql.checkData(3,1,88) + tdSql.checkData(3,1,1) tdSql.checkData(3,2,33333) - tdSql.checkData(3,3,88) + tdSql.checkData(3,3,1) tdSql.checkData(3,4,33.000000000) - tdSql.checkData(3,5,88) + tdSql.checkData(3,5,1) tdSql.checkData(3,6,"binary1") - tdSql.checkData(3,7,88) + tdSql.checkData(3,7,1) tdSql.checkData(11,0,None) tdSql.checkData(11,1,None) @@ -215,7 +215,7 @@ class TDTestCase: tdSql.checkData(11,4,None) tdSql.checkData(11,5,None) tdSql.checkData(11,6,"binary1") - tdSql.checkData(11,7,88) + tdSql.checkData(11,7,1) tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1") tdSql.checkData(0,0,None) @@ -228,13 +228,13 @@ class TDTestCase: tdSql.checkData(0,7,None) tdSql.checkData(20,0,8) - tdSql.checkData(20,1,88) + tdSql.checkData(20,1,1) tdSql.checkData(20,2,88888) - tdSql.checkData(20,3,88) + tdSql.checkData(20,3,1) tdSql.checkData(20,4,888) - tdSql.checkData(20,5,88) + tdSql.checkData(20,5,1) tdSql.checkData(20,6,88) - tdSql.checkData(20,7,88) + tdSql.checkData(20,7,1) # aggregate functions @@ -377,14 +377,14 @@ class TDTestCase: tdSql.checkRows(25) tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) - tdSql.checkData(1,0,88) + tdSql.checkData(1,0,1) tdSql.checkData(1,1,8) tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;") tdSql.checkRows(13) - tdSql.checkData(0,0,88) + tdSql.checkData(0,0,1) tdSql.checkData(0,1,8) - tdSql.checkData(1,0,88) + tdSql.checkData(1,0,1) tdSql.checkData(1,1,7) # bug fix for crash @@ -403,9 +403,9 @@ class TDTestCase: tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts") tdSql.checkRows(3) tdSql.checkData(0,0,9) - tdSql.checkData(0,1,88) + tdSql.checkData(0,1,1) tdSql.checkData(0,2,-99.990000000) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,0) @@ -414,20 +414,20 @@ class TDTestCase: tdSql.checkData(1,1,10) tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") - tdSql.checkData(0,0,88) - tdSql.checkData(0,1,88) - tdSql.checkData(1,0,88) - tdSql.checkData(1,1,88) + tdSql.checkData(0,0,1) + tdSql.checkData(0,1,1) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,1) tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,0) - tdSql.checkData(0,1,88) + tdSql.checkData(0,1,1) tdSql.checkData(0,2,0) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.checkData(1,0,1) - tdSql.checkData(1,1,88) + tdSql.checkData(1,1,1) tdSql.checkData(1,2,10) - tdSql.checkData(1,3,88) + tdSql.checkData(1,3,1) tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,16.881943016) diff --git a/tests/system-test/0-others/udf_create.py b/tests/system-test/0-others/udf_create.py index 65dad64000..bab685b336 100644 --- a/tests/system-test/0-others/udf_create.py +++ b/tests/system-test/0-others/udf_create.py @@ -193,20 +193,20 @@ class TDTestCase: tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) tdSql.checkData(0,2,1) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.checkData(0,4,1.000000000) - tdSql.checkData(0,5,88) + tdSql.checkData(0,5,1) tdSql.checkData(0,6,"binary1") - tdSql.checkData(0,7,88) + tdSql.checkData(0,7,1) tdSql.checkData(3,0,3) - tdSql.checkData(3,1,88) + tdSql.checkData(3,1,1) tdSql.checkData(3,2,33333) - tdSql.checkData(3,3,88) + tdSql.checkData(3,3,1) tdSql.checkData(3,4,33.000000000) - tdSql.checkData(3,5,88) + tdSql.checkData(3,5,1) tdSql.checkData(3,6,"binary1") - tdSql.checkData(3,7,88) + tdSql.checkData(3,7,1) tdSql.checkData(11,0,None) tdSql.checkData(11,1,None) @@ -215,7 +215,7 @@ class TDTestCase: tdSql.checkData(11,4,None) tdSql.checkData(11,5,None) tdSql.checkData(11,6,"binary1") - tdSql.checkData(11,7,88) + tdSql.checkData(11,7,1) tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1") tdSql.checkData(0,0,None) @@ -228,13 +228,13 @@ class TDTestCase: tdSql.checkData(0,7,None) tdSql.checkData(20,0,8) - tdSql.checkData(20,1,88) + tdSql.checkData(20,1,1) tdSql.checkData(20,2,88888) - tdSql.checkData(20,3,88) + tdSql.checkData(20,3,1) tdSql.checkData(20,4,888) - tdSql.checkData(20,5,88) + tdSql.checkData(20,5,1) tdSql.checkData(20,6,88) - tdSql.checkData(20,7,88) + tdSql.checkData(20,7,1) # aggregate functions @@ -377,14 +377,14 @@ class TDTestCase: tdSql.checkRows(25) tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) - tdSql.checkData(1,0,88) + tdSql.checkData(1,0,1) tdSql.checkData(1,1,8) tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;") tdSql.checkRows(13) - tdSql.checkData(0,0,88) + tdSql.checkData(0,0,1) tdSql.checkData(0,1,8) - tdSql.checkData(1,0,88) + tdSql.checkData(1,0,1) tdSql.checkData(1,1,7) # bug fix for crash @@ -403,9 +403,9 @@ class TDTestCase: tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts") tdSql.checkRows(3) tdSql.checkData(0,0,9) - tdSql.checkData(0,1,88) + tdSql.checkData(0,1,1) tdSql.checkData(0,2,-99.990000000) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,0) @@ -414,20 +414,20 @@ class TDTestCase: tdSql.checkData(1,1,10) tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") - tdSql.checkData(0,0,88) - tdSql.checkData(0,1,88) - tdSql.checkData(1,0,88) - tdSql.checkData(1,1,88) + tdSql.checkData(0,0,1) + tdSql.checkData(0,1,1) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,1) tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,0) - tdSql.checkData(0,1,88) + tdSql.checkData(0,1,1) tdSql.checkData(0,2,0) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.checkData(1,0,1) - tdSql.checkData(1,1,88) + tdSql.checkData(1,1,1) tdSql.checkData(1,2,10) - tdSql.checkData(1,3,88) + tdSql.checkData(1,3,1) tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,16.881943016) diff --git a/tests/system-test/0-others/udf_restart_taosd.py b/tests/system-test/0-others/udf_restart_taosd.py index dae707520f..42f0774c71 100644 --- a/tests/system-test/0-others/udf_restart_taosd.py +++ b/tests/system-test/0-others/udf_restart_taosd.py @@ -190,20 +190,20 @@ class TDTestCase: tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) tdSql.checkData(0,2,1) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.checkData(0,4,1.000000000) - tdSql.checkData(0,5,88) + tdSql.checkData(0,5,1) tdSql.checkData(0,6,"binary1") - tdSql.checkData(0,7,88) + tdSql.checkData(0,7,1) tdSql.checkData(3,0,3) - tdSql.checkData(3,1,88) + tdSql.checkData(3,1,1) tdSql.checkData(3,2,33333) - tdSql.checkData(3,3,88) + tdSql.checkData(3,3,1) tdSql.checkData(3,4,33.000000000) - tdSql.checkData(3,5,88) + tdSql.checkData(3,5,1) tdSql.checkData(3,6,"binary1") - tdSql.checkData(3,7,88) + tdSql.checkData(3,7,1) tdSql.checkData(11,0,None) tdSql.checkData(11,1,None) @@ -212,7 +212,7 @@ class TDTestCase: tdSql.checkData(11,4,None) tdSql.checkData(11,5,None) tdSql.checkData(11,6,"binary1") - tdSql.checkData(11,7,88) + tdSql.checkData(11,7,1) tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1") tdSql.checkData(0,0,None) @@ -225,13 +225,13 @@ class TDTestCase: tdSql.checkData(0,7,None) tdSql.checkData(20,0,8) - tdSql.checkData(20,1,88) + tdSql.checkData(20,1,1) tdSql.checkData(20,2,88888) - tdSql.checkData(20,3,88) + tdSql.checkData(20,3,1) tdSql.checkData(20,4,888) - tdSql.checkData(20,5,88) + tdSql.checkData(20,5,1) tdSql.checkData(20,6,88) - tdSql.checkData(20,7,88) + tdSql.checkData(20,7,1) # aggregate functions @@ -374,14 +374,14 @@ class TDTestCase: tdSql.checkRows(25) tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) - tdSql.checkData(1,0,88) + tdSql.checkData(1,0,1) tdSql.checkData(1,1,8) tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;") tdSql.checkRows(13) - tdSql.checkData(0,0,88) + tdSql.checkData(0,0,1) tdSql.checkData(0,1,8) - tdSql.checkData(1,0,88) + tdSql.checkData(1,0,1) tdSql.checkData(1,1,7) # bug fix for crash @@ -400,9 +400,9 @@ class TDTestCase: tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts") tdSql.checkRows(3) tdSql.checkData(0,0,9) - tdSql.checkData(0,1,88) + tdSql.checkData(0,1,1) tdSql.checkData(0,2,-99.990000000) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,0) @@ -411,20 +411,20 @@ class TDTestCase: tdSql.checkData(1,1,10) tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") - tdSql.checkData(0,0,88) - tdSql.checkData(0,1,88) - tdSql.checkData(1,0,88) - tdSql.checkData(1,1,88) + tdSql.checkData(0,0,1) + tdSql.checkData(0,1,1) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,1) tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,0) - tdSql.checkData(0,1,88) + tdSql.checkData(0,1,1) tdSql.checkData(0,2,0) - tdSql.checkData(0,3,88) + tdSql.checkData(0,3,1) tdSql.checkData(1,0,1) - tdSql.checkData(1,1,88) + tdSql.checkData(1,1,1) tdSql.checkData(1,2,10) - tdSql.checkData(1,3,88) + tdSql.checkData(1,3,1) tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,16.881943016) @@ -468,12 +468,12 @@ class TDTestCase: tdSql.checkData(1,0,1) tdSql.checkData(1,1,1) tdSql.checkData(1,2,1.110000000) - tdSql.checkData(1,3,88) + tdSql.checkData(1,3,1) tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts") tdSql.checkData(1,0,8) tdSql.checkData(1,1,88.880000000) - tdSql.checkData(1,2,88) + tdSql.checkData(1,2,1) tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;") tdSql.checkRows(22) From 3ac5b35f4b2a51c52be2c0d6c046968e4809649b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 11 Apr 2023 18:57:11 +0800 Subject: [PATCH 08/42] fix(tsdb/cache): skip schema updating for non ts row --- source/dnode/vnode/src/tsdb/tsdbCache.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 6fc8ad8be6..b25e45228f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -1468,11 +1468,14 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo hasRow = true; - code = updateTSchema(TSDBROW_SVERSION(pRow), pr, uid); - if (TSDB_CODE_SUCCESS != code) { - goto _err; + int32_t sversion = TSDBROW_SVERSION(pRow); + if (sversion != -1) { + code = updateTSchema(sversion, pr, uid); + if (TSDB_CODE_SUCCESS != code) { + goto _err; + } + pTSchema = pr->pCurrSchema; } - pTSchema = pr->pCurrSchema; int16_t nCol = pTSchema->numOfCols; TSKEY rowTs = TSDBROW_TS(pRow); @@ -1622,11 +1625,14 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCach hasRow = true; - code = updateTSchema(TSDBROW_SVERSION(pRow), pr, uid); - if (TSDB_CODE_SUCCESS != code) { - goto _err; + int32_t sversion = TSDBROW_SVERSION(pRow); + if (sversion != -1) { + code = updateTSchema(sversion, pr, uid); + if (TSDB_CODE_SUCCESS != code) { + goto _err; + } + pTSchema = pr->pCurrSchema; } - pTSchema = pr->pCurrSchema; int16_t nCol = pTSchema->numOfCols; TSKEY rowTs = TSDBROW_TS(pRow); From edc9fe9705123357da84a6441d5238d13fe2a0b0 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 11 Apr 2023 14:19:36 +0800 Subject: [PATCH 09/42] enh: change the error msg of INVALID_VGROUP_ID to Vnode is closed or removed --- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 4 ++-- source/libs/sync/src/syncMain.c | 2 +- source/util/src/terror.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index e4e0d608de..cd42a214cd 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -163,8 +163,8 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId); if (pVnode == NULL) { - dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg, - terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen); + dGWarn("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg, + terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen); terrno = (terrno != 0) ? terrno : -1; return terrno; } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index c25ea24249..5e0c75cf2b 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -599,7 +599,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_ sNTrace(pSyncNode, "propose msg, type:%s", TMSG_INFO(pMsg->msgType)); code = (*pSyncNode->syncEqMsg)(pSyncNode->msgcb, &rpcMsg); if (code != 0) { - sError("vgId:%d, failed to propose msg while enqueue since %s", pSyncNode->vgId, terrstr()); + sWarn("vgId:%d, failed to propose msg while enqueue since %s", pSyncNode->vgId, terrstr()); (void)syncRespMgrDel(pSyncNode->pSyncRespMgr, seqNum); } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 31ffbbf177..19ebe4b7aa 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -319,7 +319,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SNODE_ALREADY_DEPLOYED, "Snode already deploye TAOS_DEFINE_ERROR(TSDB_CODE_SNODE_NOT_DEPLOYED, "Snode not deployed") // vnode -TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VGROUP_ID, "Vnode moved to another dnode or was deleted") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VGROUP_ID, "Vnode is closed or removed") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_EXIST, "Vnode not exist") TAOS_DEFINE_ERROR(TSDB_CODE_VND_ALREADY_EXIST, "Vnode already exist") From f6be5f2c0edb4f0a4d79f825dbcca8baf5c2042d Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 11 Apr 2023 15:15:01 +0800 Subject: [PATCH 10/42] enh: change sync log repl mgr to sync log repl in logging msg --- source/libs/sync/src/syncPipeline.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 04e52b3f49..22c87343ef 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -633,7 +633,7 @@ int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; if (pMgr->retryBackoff == SYNC_MAX_RETRY_BACKOFF) { syncLogReplReset(pMgr); - sWarn("vgId:%d, reset sync log repl mgr since retry backoff exceeding limit. peer:%" PRIx64, pNode->vgId, + sWarn("vgId:%d, reset sync log repl since retry backoff exceeding limit. peer:%" PRIx64, pNode->vgId, pDestId->addr); return -1; } @@ -658,8 +658,8 @@ int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { if (pMgr->states[pos].acked) { if (pMgr->matchIndex < index && pMgr->states[pos].timeMs + (syncGetRetryMaxWaitMs() << 3) < nowMs) { syncLogReplReset(pMgr); - sWarn("vgId:%d, reset sync log repl mgr since stagnation. index:%" PRId64 ", peer:%" PRIx64, pNode->vgId, - index, pDestId->addr); + sWarn("vgId:%d, reset sync log repl since stagnation. index:%" PRId64 ", peer:%" PRIx64, pNode->vgId, index, + pDestId->addr); goto _out; } continue; @@ -708,7 +708,7 @@ int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNod ASSERT(pMgr->matchIndex == 0); if (pMsg->matchIndex < 0) { pMgr->restored = true; - sInfo("vgId:%d, sync log repl mgr restored. peer: dnode:%d (%" PRIx64 "), mgr: rs(%d) [%" PRId64 " %" PRId64 + sInfo("vgId:%d, sync log repl restored. peer: dnode:%d (%" PRIx64 "), mgr: rs(%d) [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pNode->vgId, DID(&destId), destId.addr, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); @@ -725,7 +725,7 @@ int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNod if (pMsg->success && pMsg->matchIndex == pMsg->lastSendIndex) { pMgr->matchIndex = pMsg->matchIndex; pMgr->restored = true; - sInfo("vgId:%d, sync log repl mgr restored. peer: dnode:%d (%" PRIx64 "), mgr: rs(%d) [%" PRId64 " %" PRId64 + sInfo("vgId:%d, sync log repl restored. peer: dnode:%d (%" PRIx64 "), mgr: rs(%d) [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pNode->vgId, DID(&destId), destId.addr, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); @@ -781,7 +781,7 @@ int32_t syncLogReplProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode SSyncLogBuffer* pBuf = pNode->pLogBuf; taosThreadMutexLock(&pBuf->mutex); if (pMsg->startTime != 0 && pMsg->startTime != pMgr->peerStartTime) { - sInfo("vgId:%d, reset sync log repl mgr in heartbeat. peer:%" PRIx64 ", start time:%" PRId64 ", old:%" PRId64 "", + sInfo("vgId:%d, reset sync log repl in heartbeat. peer:%" PRIx64 ", start time:%" PRId64 ", old:%" PRId64 "", pNode->vgId, pMsg->srcId.addr, pMsg->startTime, pMgr->peerStartTime); syncLogReplReset(pMgr); pMgr->peerStartTime = pMsg->startTime; @@ -794,8 +794,7 @@ int32_t syncLogReplProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncApp SSyncLogBuffer* pBuf = pNode->pLogBuf; taosThreadMutexLock(&pBuf->mutex); if (pMsg->startTime != pMgr->peerStartTime) { - sInfo("vgId:%d, reset sync log repl mgr in appendlog reply. peer:%" PRIx64 ", start time:%" PRId64 - ", old:%" PRId64, + sInfo("vgId:%d, reset sync log repl in appendlog reply. peer:%" PRIx64 ", start time:%" PRId64 ", old:%" PRId64, pNode->vgId, pMsg->srcId.addr, pMsg->startTime, pMgr->peerStartTime); syncLogReplReset(pMgr); pMgr->peerStartTime = pMsg->startTime; @@ -1141,8 +1140,8 @@ int32_t syncLogReplReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncI if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) { SSyncLogReplMgr* pMgr = syncNodeGetLogReplMgr(pNode, pDestId); if (pMgr) { - sInfo("vgId:%d, reset sync log repl mgr of peer:%" PRIx64 " since %s. index:%" PRId64, pNode->vgId, - pDestId->addr, terrstr(), index); + sInfo("vgId:%d, reset sync log repl of peer:%" PRIx64 " since %s. index:%" PRId64, pNode->vgId, pDestId->addr, + terrstr(), index); (void)syncLogReplReset(pMgr); } } From 09786a127d4fd0a3abc960a05b0f43a6651be223 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 11 Apr 2023 17:57:24 +0800 Subject: [PATCH 11/42] enh: refactor func names doOnce, attempt, probe, and sendTo of syncLogRepl --- source/libs/sync/inc/syncPipeline.h | 14 ++++++++------ source/libs/sync/src/syncPipeline.c | 24 ++++++++++++------------ source/libs/sync/src/syncReplication.c | 2 +- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/source/libs/sync/inc/syncPipeline.h b/source/libs/sync/inc/syncPipeline.h index 68db811b12..d709e33cd4 100644 --- a/source/libs/sync/inc/syncPipeline.h +++ b/source/libs/sync/inc/syncPipeline.h @@ -77,18 +77,19 @@ static FORCE_INLINE int32_t syncLogReplGetNextRetryBackoff(SSyncLogReplMgr* pMgr SyncTerm syncLogReplGetPrevLogTerm(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index); -int32_t syncLogReplReplicateOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode); -int32_t syncLogReplReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, - SRaftId* pDestId, bool* pBarrier); -int32_t syncLogReplReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode); -int32_t syncLogReplReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index); +int32_t syncLogReplDoOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode); +int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode); +int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index); + +int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode); +int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, SRaftId* pDestId, + bool* pBarrier); int32_t syncLogReplProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg); int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg); int32_t syncLogReplProcessReplyAsNormal(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg); int32_t syncLogReplProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncHeartbeatReply* pMsg); -int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode); // SSyncLogBuffer SSyncLogBuffer* syncLogBufferCreate(); @@ -100,6 +101,7 @@ int32_t syncLogBufferReInit(SSyncLogBuffer* pBuf, SSyncNode* pNode); int64_t syncLogBufferGetEndIndex(SSyncLogBuffer* pBuf); SyncTerm syncLogBufferGetLastMatchTerm(SSyncLogBuffer* pBuf); bool syncLogBufferIsEmpty(SSyncLogBuffer* pBuf); + int32_t syncLogBufferAppend(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEntry* pEntry); int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEntry* pEntry, SyncTerm prevTerm); int64_t syncLogBufferProceed(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncTerm* pMatchTerm); diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 22c87343ef..519a19e8c7 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -666,7 +666,7 @@ int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { } bool barrier = false; - if (syncLogReplReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { + if (syncLogReplSendTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { sError("vgId:%d, failed to replicate sync log entry since %s. index:%" PRId64 ", dest:%" PRIx64 "", pNode->vgId, terrstr(), index, pDestId->addr); goto _out; @@ -774,7 +774,7 @@ int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNod // attempt to replicate the raft log at index (void)syncLogReplReset(pMgr); - return syncLogReplReplicateProbe(pMgr, pNode, index); + return syncLogReplProbe(pMgr, pNode, index); } int32_t syncLogReplProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncHeartbeatReply* pMsg) { @@ -809,16 +809,16 @@ int32_t syncLogReplProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncApp return 0; } -int32_t syncLogReplReplicateOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { +int32_t syncLogReplDoOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { if (pMgr->restored) { - (void)syncLogReplReplicateAttempt(pMgr, pNode); + (void)syncLogReplAttempt(pMgr, pNode); } else { - (void)syncLogReplReplicateProbe(pMgr, pNode, pNode->pLogBuf->matchIndex); + (void)syncLogReplProbe(pMgr, pNode, pNode->pLogBuf->matchIndex); } return 0; } -int32_t syncLogReplReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index) { +int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index) { ASSERT(!pMgr->restored); ASSERT(pMgr->startIndex >= 0); int64_t retryMaxWaitMs = syncGetRetryMaxWaitMs(); @@ -833,7 +833,7 @@ int32_t syncLogReplReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncI SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; bool barrier = false; SyncTerm term = -1; - if (syncLogReplReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { + if (syncLogReplSendTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { sError("vgId:%d, failed to replicate log entry since %s. index:%" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId, terrstr(), index, pDestId->addr); return -1; @@ -856,7 +856,7 @@ int32_t syncLogReplReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncI return 0; } -int32_t syncLogReplReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { +int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { ASSERT(pMgr->restored); SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; @@ -878,7 +878,7 @@ int32_t syncLogReplReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; bool barrier = false; SyncTerm term = -1; - if (syncLogReplReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { + if (syncLogReplSendTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { sError("vgId:%d, failed to replicate log entry since %s. index:%" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId, terrstr(), index, pDestId->addr); return -1; @@ -931,7 +931,7 @@ int32_t syncLogReplProcessReplyAsNormal(SSyncLogReplMgr* pMgr, SSyncNode* pNode, pMgr->startIndex = pMgr->matchIndex; } - return syncLogReplReplicateAttempt(pMgr, pNode); + return syncLogReplAttempt(pMgr, pNode); } SSyncLogReplMgr* syncLogReplCreate() { @@ -1126,8 +1126,8 @@ SSyncRaftEntry* syncLogBufferGetOneEntry(SSyncLogBuffer* pBuf, SSyncNode* pNode, return pEntry; } -int32_t syncLogReplReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, - SRaftId* pDestId, bool* pBarrier) { +int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, SRaftId* pDestId, + bool* pBarrier) { SSyncRaftEntry* pEntry = NULL; SRpcMsg msgOut = {0}; bool inBuf = false; diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 43d2bc839b..8ac9a860e3 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -74,7 +74,7 @@ int32_t syncNodeReplicateWithoutLock(SSyncNode* pNode) { continue; } SSyncLogReplMgr* pMgr = pNode->logReplMgrs[i]; - (void)syncLogReplReplicateOnce(pMgr, pNode); + (void)syncLogReplDoOnce(pMgr, pNode); } return 0; } From 3efa36ca51f3ecdc64b3b1156906ec4eb52c905d Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 11 Apr 2023 18:22:59 +0800 Subject: [PATCH 12/42] enh: refactor func name syncLogIsReplicationBarrier to syncLogReplBarrier --- source/libs/sync/inc/syncRaftEntry.h | 2 +- source/libs/sync/src/syncPipeline.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/sync/inc/syncRaftEntry.h b/source/libs/sync/inc/syncRaftEntry.h index a39e043c52..f9447e0168 100644 --- a/source/libs/sync/inc/syncRaftEntry.h +++ b/source/libs/sync/inc/syncRaftEntry.h @@ -45,7 +45,7 @@ SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId) void syncEntryDestroy(SSyncRaftEntry* pEntry); void syncEntry2OriginalRpc(const SSyncRaftEntry* pEntry, SRpcMsg* pRpcMsg); // step 7 -static FORCE_INLINE bool syncLogIsReplicationBarrier(SSyncRaftEntry* pEntry) { +static FORCE_INLINE bool syncLogReplBarrier(SSyncRaftEntry* pEntry) { return pEntry->originalRpcType == TDMT_SYNC_NOOP; } diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 519a19e8c7..69888ed8ea 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -1147,7 +1147,7 @@ int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex ind } goto _err; } - *pBarrier = syncLogIsReplicationBarrier(pEntry); + *pBarrier = syncLogReplBarrier(pEntry); prevLogTerm = syncLogReplGetPrevLogTerm(pMgr, pNode, index); if (prevLogTerm < 0) { From 19f09a10e64734702013810f7820a4709dd3acad Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 12 Apr 2023 10:14:45 +0800 Subject: [PATCH 13/42] fix: show user privileges invalid write --- source/dnode/mnode/impl/src/mndUser.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 3a1c4ce58f..d08227927a 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -1168,15 +1168,13 @@ static void mndLoopHash(SHashObj *hash, char *priType, SSDataBlock *pBlock, int3 sprintf(sql, "error"); } - // char *obj = taosMemoryMalloc(sqlLen + VARSTR_HEADER_SIZE + 1); char obj[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(obj, sql, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, *numOfRows, (const char *)obj, false); - // taosMemoryFree(obj); } else { - char condition[20] = {0}; + char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, *numOfRows, (const char *)condition, false); @@ -1257,12 +1255,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)objName, false); - char tableName[20] = {0}; + char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false); - char condition[20] = {0}; + char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)condition, false); @@ -1292,12 +1290,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)objName, false); - char tableName[20] = {0}; + char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false); - char condition[20] = {0}; + char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)condition, false); @@ -1329,12 +1327,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)topicName, false); - char tableName[20] = {0}; + char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false); - char condition[20] = {0}; + char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)condition, false); From d551075bbada11f4bafafdb7ecfeeb8f4f1ea4de Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 12 Apr 2023 16:22:25 +0800 Subject: [PATCH 14/42] fix: taosdump continue if ts out of range for main (#20887) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 0110b27b32..d8bf3a09b4 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 149ac34 + GIT_TAG 0681d8b SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 5ea2ba0a387d2685b57cc03bc07dfafd8d930cf0 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 12 Apr 2023 17:39:23 +0800 Subject: [PATCH 15/42] fix: error querying after setting permissions for varchar type tags --- source/libs/parser/src/parTranslater.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 1d6d123cb4..80f334019b 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1235,6 +1235,10 @@ static int32_t calcTypeBytes(SDataType dt) { } static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) { + if (pVal->translate) { + return TSDB_CODE_SUCCESS; + } + SDataType dt = pVal->node.resType; dt.bytes = calcTypeBytes(dt); return translateValueImpl(pCxt, pVal, dt, false); @@ -1692,7 +1696,8 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) { pSelect->hasUniqueFunc = pSelect->hasUniqueFunc ? true : (FUNCTION_TYPE_UNIQUE == pFunc->funcType); pSelect->hasTailFunc = pSelect->hasTailFunc ? true : (FUNCTION_TYPE_TAIL == pFunc->funcType); pSelect->hasInterpFunc = pSelect->hasInterpFunc ? true : (FUNCTION_TYPE_INTERP == pFunc->funcType); - pSelect->hasInterpPseudoColFunc = pSelect->hasInterpPseudoColFunc ? true : fmIsInterpPseudoColumnFunc(pFunc->funcId); + pSelect->hasInterpPseudoColFunc = + pSelect->hasInterpPseudoColFunc ? true : fmIsInterpPseudoColumnFunc(pFunc->funcId); pSelect->hasLastRowFunc = pSelect->hasLastRowFunc ? true : (FUNCTION_TYPE_LAST_ROW == pFunc->funcType); pSelect->hasLastFunc = pSelect->hasLastFunc ? true : (FUNCTION_TYPE_LAST == pFunc->funcType); pSelect->hasTimeLineFunc = pSelect->hasTimeLineFunc ? true : fmIsTimelineFunc(pFunc->funcId); @@ -3372,7 +3377,8 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE); } if (pSelect->hasInterpPseudoColFunc) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "Has Interp pseudo column(s) but missing interp function"); + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, + "Has Interp pseudo column(s) but missing interp function"); } return TSDB_CODE_SUCCESS; } From 1ed3149c393a77f5386457397d20ad03261f1e8d Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 12 Apr 2023 17:41:43 +0800 Subject: [PATCH 16/42] fix: the precision of delete statement --- source/libs/parser/src/parTranslater.c | 8 ++ source/libs/scalar/src/scalar.c | 2 +- tests/script/tsim/insert/delete0.sim | 161 +++++++++++++++++++++++++ tests/script/tsim/testsuit.sim | 1 + 4 files changed, 171 insertions(+), 1 deletion(-) create mode 100644 tests/script/tsim/insert/delete0.sim diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 1d6d123cb4..fdaeba2f0f 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -645,6 +645,10 @@ static bool isSelectStmt(SNode* pCurrStmt) { return NULL != pCurrStmt && QUERY_NODE_SELECT_STMT == nodeType(pCurrStmt); } +static bool isDeleteStmt(SNode* pCurrStmt) { + return NULL != pCurrStmt && QUERY_NODE_DELETE_STMT == nodeType(pCurrStmt); +} + static bool isSetOperator(SNode* pCurrStmt) { return NULL != pCurrStmt && QUERY_NODE_SET_OPERATOR == nodeType(pCurrStmt); } @@ -669,6 +673,9 @@ static uint8_t getPrecisionFromCurrStmt(SNode* pCurrStmt, uint8_t defaultVal) { if (NULL != pCurrStmt && QUERY_NODE_CREATE_STREAM_STMT == nodeType(pCurrStmt)) { return getPrecisionFromCurrStmt(((SCreateStreamStmt*)pCurrStmt)->pQuery, defaultVal); } + if (isDeleteStmt(pCurrStmt)) { + return ((SDeleteStmt*)pCurrStmt)->precision; + } return defaultVal; } @@ -3741,6 +3748,7 @@ static int32_t translateDelete(STranslateContext* pCxt, SDeleteStmt* pDelete) { pCxt->pCurrStmt = (SNode*)pDelete; int32_t code = translateFrom(pCxt, pDelete->pFromTable); if (TSDB_CODE_SUCCESS == code) { + pDelete->precision = ((STableNode*)pDelete->pFromTable)->precision; code = translateDeleteWhere(pCxt, pDelete); } pCxt->currClause = SQL_CLAUSE_SELECT; diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index fe01977b2e..47bea920ef 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -463,7 +463,7 @@ int32_t sclInitParamList(SScalarParam **pParams, SNodeList *pParamList, SScalarC sclError("calloc %d failed", (int32_t)((*paramNum) * sizeof(SScalarParam))); SCL_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - + if (pParamList) { SNode *tnode = NULL; int32_t i = 0; diff --git a/tests/script/tsim/insert/delete0.sim b/tests/script/tsim/insert/delete0.sim new file mode 100644 index 0000000000..c8a3ba7092 --- /dev/null +++ b/tests/script/tsim/insert/delete0.sim @@ -0,0 +1,161 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print =============== create database with different precision +sql create database d0 keep 365000 +sql create database d1 keep 365000 precision 'ms' +sql create database d2 keep 365000 precision 'us' +sql create database d3 keep 50000 precision 'ns' + +sql select * from information_schema.ins_databases +if $rows != 6 then + return -1 +endi + +print $data00 $data01 $data02 + + +sql create table if not exists d0.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned) +sql create table if not exists d1.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned) +sql create table if not exists d2.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned) +sql create table if not exists d3.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned) +sql create table if not exists d0.ntb (ts timestamp, c1 int, c2 float, c3 double) +sql create table if not exists d1.ntb (ts timestamp, c1 int, c2 float, c3 double) +sql create table if not exists d2.ntb (ts timestamp, c1 int, c2 float, c3 double) +sql create table if not exists d3.ntb (ts timestamp, c1 int, c2 float, c3 double) + +sql create table d0.ct1 using d0.stb tags(1000) +sql create table d1.ct1 using d1.stb tags(1000) +sql create table d2.ct1 using d2.stb tags(1000) +sql create table d3.ct1 using d3.stb tags(1000) +sql create table d0.ct2 using d0.stb tags(1000) +sql create table d1.ct2 using d1.stb tags(1000) +sql create table d2.ct2 using d2.stb tags(1000) +sql create table d3.ct2 using d3.stb tags(1000) + + +sql insert into d0.ct1 values(now+0s, 10, 2.0, 3.0) +sql insert into d1.ct1 values(now+0s, 10, 2.0, 3.0) +sql insert into d2.ct1 values(now+0s, 10, 2.0, 3.0) +sql insert into d3.ct1 values(now+0s, 10, 2.0, 3.0) +sql insert into d0.ct2 values(now+0s, 10, 2.0, 3.0) +sql insert into d1.ct2 values(now+0s, 10, 2.0, 3.0) +sql insert into d2.ct2 values(now+0s, 10, 2.0, 3.0) +sql insert into d3.ct2 values(now+0s, 10, 2.0, 3.0) +sql insert into d0.ntb values(now+0s, 10, 2.0, 3.0) +sql insert into d1.ntb values(now+0s, 10, 2.0, 3.0) +sql insert into d2.ntb values(now+0s, 10, 2.0, 3.0) +sql insert into d3.ntb values(now+0s, 10, 2.0, 3.0) + + +print =============== query data from super table +sql select count(*) from d0.stb +if $data00 != 2 then + return -1 +endi +sql select count(*) from d1.stb +if $data00 != 2 then + return -1 +endi +sql select count(*) from d2.stb +if $data00 != 2 then + return -1 +endi +sql select count(*) from d3.stb +if $data00 != 2 then + return -1 +endi + +print =============== delete from child table +sql delete from d0.ct1 where ts < now() +sql delete from d1.ct1 where ts < now() +sql delete from d2.ct1 where ts < now() +sql delete from d3.ct1 where ts < now() + + +print =============== query data from super table +sql select count(*) from d0.stb +if $data00 != 1 then + return -1 +endi +sql select count(*) from d1.stb +if $data00 != 1 then + return -1 +endi +sql select count(*) from d2.stb +if $data00 != 1 then + return -1 +endi +sql select count(*) from d3.stb +if $data00 != 1 then + return -1 +endi +print =============== query data from normal table +sql select count(*) from d0.ntb +if $data00 != 1 then + return -1 +endi +sql select count(*) from d1.ntb +if $data00 != 1 then + return -1 +endi +sql select count(*) from d2.ntb +if $data00 != 1 then + return -1 +endi +sql select count(*) from d3.ntb +if $data00 != 1 then + return -1 +endi + +print =============== delete from super table +sql delete from d0.stb where ts < now() +sql delete from d1.stb where ts < now() +sql delete from d2.stb where ts < now() +sql delete from d3.stb where ts < now() + +print =============== query data from super table +sql select count(*) from d0.stb +if $data00 != 0 then + return -1 +endi +sql select count(*) from d1.stb +if $data00 != 0 then + return -1 +endi +sql select count(*) from d2.stb +if $data00 != 0 then + return -1 +endi +sql select count(*) from d3.stb +if $data00 != 0 then + return -1 +endi + +print =============== delete from normal table +sql delete from d0.ntb where ts < now() +sql delete from d1.ntb where ts < now() +sql delete from d2.ntb where ts < now() +sql delete from d3.ntb where ts < now() + +print =============== query data from normal table +sql select count(*) from d0.ntb +if $data00 != 0 then + return -1 +endi +sql select count(*) from d1.ntb +if $data00 != 0 then + return -1 +endi +sql select count(*) from d2.ntb +if $data00 != 0 then + return -1 +endi +sql select count(*) from d3.ntb +if $data00 != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/testsuit.sim b/tests/script/tsim/testsuit.sim index c5fbf41b66..0abe56ab3c 100644 --- a/tests/script/tsim/testsuit.sim +++ b/tests/script/tsim/testsuit.sim @@ -114,6 +114,7 @@ run tsim/insert/basic1.sim run tsim/insert/commit-merge0.sim run tsim/insert/basic0.sim run tsim/insert/update0.sim +run tsim/insert/delete0.sim run tsim/insert/backquote.sim run tsim/insert/null.sim run tsim/catalog/alterInCurrent.sim From 3bd721518d7a6f203f3552c8667ceb2d7bd35479 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 12 Apr 2023 17:43:44 +0800 Subject: [PATCH 17/42] chore: revert the extra line --- source/libs/scalar/src/scalar.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index 47bea920ef..47cabe1e6c 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -463,7 +463,6 @@ int32_t sclInitParamList(SScalarParam **pParams, SNodeList *pParamList, SScalarC sclError("calloc %d failed", (int32_t)((*paramNum) * sizeof(SScalarParam))); SCL_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - if (pParamList) { SNode *tnode = NULL; int32_t i = 0; From 4ac4b4f0c892d1d117fda80e81c977dfc45ea95f Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 12 Apr 2023 17:45:12 +0800 Subject: [PATCH 18/42] chore: revert the extra line --- source/libs/scalar/src/scalar.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index 47cabe1e6c..fe01977b2e 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -463,6 +463,7 @@ int32_t sclInitParamList(SScalarParam **pParams, SNodeList *pParamList, SScalarC sclError("calloc %d failed", (int32_t)((*paramNum) * sizeof(SScalarParam))); SCL_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } + if (pParamList) { SNode *tnode = NULL; int32_t i = 0; From bd24dcf3748e1c5f3577b90cdfc729e4bae87ef1 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 12 Apr 2023 17:59:21 +0800 Subject: [PATCH 19/42] chore: more code --- tests/script/tsim/insert/delete0.sim | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/script/tsim/insert/delete0.sim b/tests/script/tsim/insert/delete0.sim index c8a3ba7092..5653853643 100644 --- a/tests/script/tsim/insert/delete0.sim +++ b/tests/script/tsim/insert/delete0.sim @@ -4,10 +4,10 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== create database with different precision -sql create database d0 keep 365000 -sql create database d1 keep 365000 precision 'ms' -sql create database d2 keep 365000 precision 'us' -sql create database d3 keep 50000 precision 'ns' +sql create database d0 keep 365 +sql create database d1 keep 365 precision 'ms' +sql create database d2 keep 365 precision 'us' +sql create database d3 keep 365 precision 'ns' sql select * from information_schema.ins_databases if $rows != 6 then From 0145cbe100921844e8f54988f90084fb02367afe Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 12 Apr 2023 18:39:06 +0800 Subject: [PATCH 20/42] chore: add test case to CI --- tests/parallel_test/cases.task | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index e6aa2f1fea..b37f6e7929 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -783,6 +783,7 @@ ,,y,script,./test.sh -f tsim/insert/query_multi_file.sim ,,y,script,./test.sh -f tsim/insert/tcp.sim ,,y,script,./test.sh -f tsim/insert/update0.sim +,,y,script,./test.sh -f tsim/insert/delete0.sim ,,y,script,./test.sh -f tsim/insert/update1_sort_merge.sim ,,y,script,./test.sh -f tsim/insert/update2.sim ,,y,script,./test.sh -f tsim/parser/alter__for_community_version.sim From 81610773c512a18d232698c4ec1828ecdd497766 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 11 Apr 2023 15:47:27 +0800 Subject: [PATCH 21/42] add test cases --- source/libs/executor/src/scanoperator.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index f0827886a0..852223378c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -329,7 +329,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca } else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) { qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64 ", uid:%" PRIu64, GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, pBlockInfo->id.uid); - doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, 1); + doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); pCost->skipBlocks += 1; tsdbReleaseDataBlock(pTableScanInfo->dataReader); return TSDB_CODE_SUCCESS; @@ -340,7 +340,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64 , GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); - doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, 1); + doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); tsdbReleaseDataBlock(pTableScanInfo->dataReader); return TSDB_CODE_SUCCESS; } else { @@ -485,7 +485,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int // backup the rows int32_t backupRows = pBlock->info.rows; - pBlock->info.rows = (rows < pBlock->info.rows) ? pBlock->info.rows : rows; + pBlock->info.rows = rows; bool freeReader = false; STableCachedVal val = {0}; From fd88602572103a93c7be85b85f5172ccd9b1cb27 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Wed, 12 Apr 2023 20:04:19 +0800 Subject: [PATCH 22/42] test: reopen tmqDelete-1ctb.py --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index b37f6e7929..65cddc4248 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -89,7 +89,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot1.py -# ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDelete-multiCtb.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStbCtb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot0.py From ef219095cf524e58aa2673b0b698b7d311a96077 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 13 Apr 2023 10:06:49 +0800 Subject: [PATCH 23/42] fix(tsdb/read): remove duplicat schema fetching --- source/dnode/vnode/src/tsdb/tsdbRead.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index b80c952ee0..d868c54fd1 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2320,7 +2320,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* tsdbRowMergerAdd(&merge, pRow, pSchema); } else { - STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); + // STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); code = tsdbRowMergerInit(&merge, NULL, pRow, pSchema); if (code != TSDB_CODE_SUCCESS) { return code; @@ -2352,7 +2352,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* tsdbRowMergerAdd(&merge, piRow, piSchema); } else { init = true; - STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid); + // STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid); code = tsdbRowMergerInit(&merge, pSchema, piRow, piSchema); if (code != TSDB_CODE_SUCCESS) { return code; From 589ed9d0cee9f77ac132ed46ef7a529afd928231 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 13 Apr 2023 12:09:44 +0800 Subject: [PATCH 24/42] fix(tsdb/cache): fix block index ref releasing --- source/dnode/vnode/src/tsdb/tsdbCache.c | 13 ++++++++++++- tests/script/tsim/parser/last_cache.sim | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index b25e45228f..c09a286c6b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -35,7 +35,11 @@ _err: static void tsdbCloseBICache(STsdb *pTsdb) { SLRUCache *pCache = pTsdb->biCache; if (pCache) { + int32_t elems = taosLRUCacheGetElems(pCache); + tsdbTrace("vgId:%d, elems: %d", TD_VID(pTsdb->pVnode), elems); taosLRUCacheEraseUnrefEntries(pCache); + elems = taosLRUCacheGetElems(pCache); + tsdbTrace("vgId:%d, elems: %d", TD_VID(pTsdb->pVnode), elems); taosLRUCacheCleanup(pCache); @@ -819,7 +823,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie * &state->blockIdx); */ state->pBlockIdx = taosArraySearch(state->aBlockIdx, state->pBlockIdxExp, tCmprBlockIdx, TD_EQ); - if (!state->pBlockIdx) { /* + if (!state->pBlockIdx) { + tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle); + + state->aBlockIdxHandle = NULL; + state->aBlockIdx = NULL; + /* tsdbDataFReaderClose(state->pDataFReader); *state->pDataFReader = NULL; resetLastBlockLoadInfo(state->pLoadInfo);*/ @@ -1936,6 +1945,7 @@ int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHa taosThreadMutexUnlock(&pTsdb->biMutex); } + tsdbTrace("bi cache:%p, ref", pCache); *handle = h; return code; @@ -1945,6 +1955,7 @@ int32_t tsdbBICacheRelease(SLRUCache *pCache, LRUHandle *h) { int32_t code = 0; taosLRUCacheRelease(pCache, h, false); + tsdbTrace("bi cache:%p, release", pCache); return code; } diff --git a/tests/script/tsim/parser/last_cache.sim b/tests/script/tsim/parser/last_cache.sim index 9a41a9f5aa..ef7215d6e9 100644 --- a/tests/script/tsim/parser/last_cache.sim +++ b/tests/script/tsim/parser/last_cache.sim @@ -53,7 +53,7 @@ sql insert into tbc values ("2021-05-11 10:12:29",36, 37, NULL, -4005) sql insert into tbd values ("2021-05-11 10:12:29",NULL,NULL,NULL,NULL ) run tsim/parser/last_cache_query.sim - +sql flush database $db system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start From 1b198bc40fa79e287dbeea2d45491b50484d4dcd Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Thu, 13 Apr 2023 13:25:23 +0800 Subject: [PATCH 25/42] enh(taosAdapter): make the schemaless automatic database creation configurable (#20902) --- cmake/taosadapter_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index b2f335e1f7..ba937b40c1 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG cb1e89c + GIT_TAG e02ddb2 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From b405855ba804818327b02760d1d058f219491380 Mon Sep 17 00:00:00 2001 From: cadem Date: Thu, 13 Apr 2023 13:40:42 +0800 Subject: [PATCH 26/42] balance leader to enterprise --- source/dnode/mnode/impl/CMakeLists.txt | 1 + source/dnode/mnode/impl/src/mndVgroup.c | 57 ++++--------------------- 2 files changed, 10 insertions(+), 48 deletions(-) diff --git a/source/dnode/mnode/impl/CMakeLists.txt b/source/dnode/mnode/impl/CMakeLists.txt index 493ba48601..b9aa8eb674 100644 --- a/source/dnode/mnode/impl/CMakeLists.txt +++ b/source/dnode/mnode/impl/CMakeLists.txt @@ -5,6 +5,7 @@ ENDIF () IF (TD_ENTERPRISE) LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/privilege/src/privilege.c) LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDb.c) + LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndVgroup.c) ENDIF () add_library(mnode STATIC ${MNODE_SRC}) diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 31924e0471..ed1fddb63f 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -1891,57 +1891,18 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra return 0; } +extern int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq); + int32_t mndProcessVgroupBalanceLeaderMsg(SRpcMsg *pReq) { - int32_t code = -1; - - SBalanceVgroupLeaderReq req = {0}; - if (tDeserializeSBalanceVgroupLeaderReq(pReq->pCont, pReq->contLen, &req) != 0) { - terrno = TSDB_CODE_INVALID_MSG; - return code; - } - - SMnode *pMnode = pReq->info.node; - SSdb *pSdb = pMnode->pSdb; - - int32_t total = sdbGetSize(pSdb, SDB_VGROUP); - if(total <= 0) { - terrno = TSDB_CODE_TSC_INVALID_OPERATION; - return code; - } - - STrans *pTrans = NULL; - pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "bal-vg-leader"); - if (pTrans == NULL) goto _OVER; - mndTransSetSerial(pTrans); - mInfo("trans:%d, used to balance vgroup leader", pTrans->id); - - void *pIter = NULL; - int32_t count = 0; - while (1) { - SVgObj *pVgroup = NULL; - pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); - if (pIter == NULL) break; - - if(mndAddVgroupBalanceToTrans(pMnode, pVgroup, pTrans) == 0){ - count++; - } - - sdbRelease(pSdb, pVgroup); - } - - if(count == 0) { - terrno = TSDB_CODE_TSC_INVALID_OPERATION; - goto _OVER; - } - - if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; - code = 0; - -_OVER: - mndTransDrop(pTrans); - return code; + return mndProcessVgroupBalanceLeaderMsgImp(pReq); } +#ifndef TD_ENTERPRISE +int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq) { + return 0; +} +#endif + static int32_t mndCheckDnodeMemory(SMnode *pMnode, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pOldVgroup, SVgObj *pNewVgroup, SArray *pArray) { for (int32_t i = 0; i < (int32_t)taosArrayGetSize(pArray); ++i) { From d1544e8859c9764293f973fce12eab0e6c48949c Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 13 Apr 2023 14:05:04 +0800 Subject: [PATCH 27/42] fix: fix illegal usage of _isfilled/_irowts --- source/libs/function/src/builtins.c | 2 +- source/libs/parser/src/parTranslater.c | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 69951f680e..a293f45238 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -3279,7 +3279,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_irowts", .type = FUNCTION_TYPE_IROWTS, - .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC|FUNC_MGT_KEEP_ORDER_FUNC, + .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, .translateFunc = translateTimePseudoColumn, .getEnvFunc = getTimePseudoFuncEnv, .initFunc = NULL, diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index fd656a5ab7..2c2cd09da0 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -688,6 +688,10 @@ static bool isWindowPseudoColumnFunc(const SNode* pNode) { return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId)); } +static bool isInterpPseudoColumnFunc(const SNode* pNode) { + return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId)); +} + static bool isTimelineFunc(const SNode* pNode) { return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsTimelineFunc(((SFunctionNode*)pNode)->funcId)); } @@ -1295,7 +1299,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { } static EDealRes haveVectorFunction(SNode* pNode, void* pContext) { - if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode)) { + if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode) || isInterpPseudoColumnFunc(pNode)) { *((bool*)pContext) = true; return DEAL_RES_END; } @@ -1522,6 +1526,20 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return TSDB_CODE_SUCCESS; } +static int32_t translateInterpPseudoColumnFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { + if (!fmIsInterpPseudoColumnFunc(pFunc->funcId)) { + return TSDB_CODE_SUCCESS; + } + if (!isSelectStmt(pCxt->pCurrStmt) || NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pWindow) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_WINDOW_PC); + } + if (beforeWindow(pCxt->currClause)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_WINDOW_PC, "There mustn't be %s", + pFunc->functionName); + } + return TSDB_CODE_SUCCESS; +} + static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { if (!fmIsTimelineFunc(pFunc->funcId)) { return TSDB_CODE_SUCCESS; From fbbd3e4bddd0e4f863f75595577b022c33158c5f Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 13 Apr 2023 14:05:04 +0800 Subject: [PATCH 28/42] fix: fix illegal usage of _isfilled/_irowts --- source/libs/parser/src/parTranslater.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 2c2cd09da0..b1610cac53 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1530,12 +1530,13 @@ static int32_t translateInterpPseudoColumnFunc(STranslateContext* pCxt, SFunctio if (!fmIsInterpPseudoColumnFunc(pFunc->funcId)) { return TSDB_CODE_SUCCESS; } - if (!isSelectStmt(pCxt->pCurrStmt) || NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pWindow) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_WINDOW_PC); + if (!isSelectStmt(pCxt->pCurrStmt)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, + "%s must be used in select statements", pFunc->functionName); } - if (beforeWindow(pCxt->currClause)) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_WINDOW_PC, "There mustn't be %s", - pFunc->functionName); + if (pCxt->currClause == SQL_CLAUSE_WHERE) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE, + "%s is not allowed in where clause", pFunc->functionName); } return TSDB_CODE_SUCCESS; } @@ -1837,6 +1838,9 @@ static int32_t translateNormalFunction(STranslateContext* pCxt, SFunctionNode* p if (TSDB_CODE_SUCCESS == code) { code = translateInterpFunc(pCxt, pFunc); } + if (TSDB_CODE_SUCCESS == code) { + code = translateInterpPseudoColumnFunc(pCxt, pFunc); + } if (TSDB_CODE_SUCCESS == code) { code = translateTimelineFunc(pCxt, pFunc); } From be3082c85e3255e2b2cd100ec912f503145eaf14 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 13 Apr 2023 14:36:16 +0800 Subject: [PATCH 29/42] add test cases --- tests/system-test/2-query/interp.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 74b552dcc8..ddf3f2534d 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -2382,6 +2382,14 @@ class TDTestCase: tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + # invalid pseudo column usage + tdSql.error(f"select interp(_irowts) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(_isfilled) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _isfilled = true range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _irowts > 0 range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + + + tdLog.printNoPrefix("==========step13:stable cases") From 1f87ea5e45f5800369d63f24c6efdb443b2f237b Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Thu, 13 Apr 2023 15:08:49 +0800 Subject: [PATCH 30/42] chore: fix install.sh for explorer --- packaging/tools/install.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index a1a95d9f30..d998223615 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -575,11 +575,11 @@ function install_config() { function install_share_etc() { [ ! -d ${script_dir}/share/etc ] && return for c in `ls ${script_dir}/share/etc/`; do - if [ -e /etc/$c ]; then - out=/etc/$c.new.`date +%F` + if [ -e /etc/${clientName2}/$c ]; then + out=/etc/${clientName2}/$c.new.`date +%F` ${csudo}cp -f ${script_dir}/share/etc/$c $out ||: else - ${csudo}cp -f ${script_dir}/share/etc/$c /etc/$c ||: + ${csudo}cp -f ${script_dir}/share/etc/$c /etc/${clientName2}/$c ||: fi done From b856131ad7e4cacdfc4b9b686e343facc24b8e50 Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Thu, 13 Apr 2023 15:22:27 +0800 Subject: [PATCH 31/42] enh(docker): add debugging tools in TDengine image (#20908) --- packaging/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index 35bea0e65c..4b61a0cc0a 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -12,7 +12,7 @@ ENV TINI_VERSION v0.19.0 ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini ENV DEBIAN_FRONTEND=noninteractive WORKDIR /root/ -RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini +RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat curl gdb vim tmux less net-tools valgrind && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ LC_CTYPE=en_US.UTF-8 \ From 4ae792384b0375db2b5c62e76ac2aeb611bc7f8a Mon Sep 17 00:00:00 2001 From: xinsheng Ren <285808407@qq.com> Date: Thu, 13 Apr 2023 15:22:39 +0800 Subject: [PATCH 32/42] Doc/xsren/install des on mac base main (#20910) * install desc on mac * echo > exception --------- Co-authored-by: facetosea <25808407@qq.com> --- packaging/tools/mac_before_install.txt | 8 ++++---- packaging/tools/mac_before_install_client.txt | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packaging/tools/mac_before_install.txt b/packaging/tools/mac_before_install.txt index a428c612b2..4ce2374b7f 100644 --- a/packaging/tools/mac_before_install.txt +++ b/packaging/tools/mac_before_install.txt @@ -1,9 +1,9 @@ TDengine is an open-source, cloud-native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. With its built-in caching, stream processing, and data subscription capabilities, TDengine offers a simplified solution for time-series data processing. -To configure TDengine : edit /etc/taos/taos.cfg -To start service : launchctl start com.tdengine.taosd -To start Taos Adapter : launchctl start com.tdengine.taosadapter -To access TDengine : use taos in shell +• To configure TDengine, edit /etc/taos/taos.cfg +• To start service, run launchctl start com.tdengine.taosd +• To start Taos Adapter, run launchctl start com.tdengine.taosadapter +• To access TDengine from your local machine, run taos If you're experiencing problems installing TDengine, check the file /var/log/taos/tdengine_install.log to help troubleshoot the installation. diff --git a/packaging/tools/mac_before_install_client.txt b/packaging/tools/mac_before_install_client.txt index 0457d73c49..cce8191667 100644 --- a/packaging/tools/mac_before_install_client.txt +++ b/packaging/tools/mac_before_install_client.txt @@ -1,9 +1,9 @@ TDengine is an open-source, cloud-native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. With its built-in caching, stream processing, and data subscription capabilities, TDengine offers a simplified solution for time-series data processing. -Once it's installed, please take the steps below: -1: open a terminal/shell in Mac -2: if connecting to Cloud Service, follow the instructions on your cloud service account and configure the environment variable -3: if connecting to another TDengine Service, you can also view help information via "taos --help" -4: execute command taos +After the installation process is complete, perform the following steps to start using TDengine: +1: Open Terminal on your Mac. +2: To connect to a TDengine server using the default settings and credentials, run the taos command. +3: To connect to a TDengine server using custom settings or credentials, run taos --help for more information. +4: To connect to TDengine Cloud, follow the instructions on the Tools - TDengine CLI page in your TDengine Cloud account. -If you're experiencing problems installing TDengine, check the file /var/log/taos/tdengine_install.log to help troubleshoot the installation. +If any issues occur during installation, check the /var/log/taos/tdengine_install.log file to troubleshoot. \ No newline at end of file From a8c4cedb2c17f5cd5d285b48dc423f1f67618872 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Thu, 13 Apr 2023 15:54:50 +0800 Subject: [PATCH 33/42] chore: fix packaging install.sh for explorer OEM --- packaging/tools/install.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index d998223615..2eaebc2490 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -579,6 +579,7 @@ function install_share_etc() { out=/etc/${clientName2}/$c.new.`date +%F` ${csudo}cp -f ${script_dir}/share/etc/$c $out ||: else + mkdir -p /etc/${clientName2} >/dev/null 2>/dev/null ||: ${csudo}cp -f ${script_dir}/share/etc/$c /etc/${clientName2}/$c ||: fi done From e6ce8adbd932e3ec095421d196fdd7ed580fb6b9 Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Thu, 13 Apr 2023 15:56:01 +0800 Subject: [PATCH 34/42] enh(docker): add debugging tools in TDengine image (#20909) --- packaging/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index 35bea0e65c..7d90beac1c 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -12,7 +12,7 @@ ENV TINI_VERSION v0.19.0 ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini ENV DEBIAN_FRONTEND=noninteractive WORKDIR /root/ -RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini +RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat curl gdb vim tmux less net-tools valgrind && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ LC_CTYPE=en_US.UTF-8 \ From 1f8df16048b0bd90832b04e142d4aaa6d2f3b021 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Thu, 13 Apr 2023 16:28:07 +0800 Subject: [PATCH 35/42] chore: fix install.sh when not root --- packaging/tools/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 2eaebc2490..1b47b10520 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -579,7 +579,7 @@ function install_share_etc() { out=/etc/${clientName2}/$c.new.`date +%F` ${csudo}cp -f ${script_dir}/share/etc/$c $out ||: else - mkdir -p /etc/${clientName2} >/dev/null 2>/dev/null ||: + ${csudo}mkdir -p /etc/${clientName2} >/dev/null 2>/dev/null ||: ${csudo}cp -f ${script_dir}/share/etc/$c /etc/${clientName2}/$c ||: fi done From 9001b3950ea67f99dac40941ed1f5293887ab70d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 13 Apr 2023 21:05:25 +0800 Subject: [PATCH 36/42] fix: improve taos shell user experience for connecting cloud (#20912) --- compile_flags.txt | 9 +++++++++ tools/shell/src/shellArguments.c | 18 +++++++++++------- tools/shell/src/shellWebsocket.c | 2 +- 3 files changed, 21 insertions(+), 8 deletions(-) create mode 100644 compile_flags.txt diff --git a/compile_flags.txt b/compile_flags.txt new file mode 100644 index 0000000000..c61f9701ab --- /dev/null +++ b/compile_flags.txt @@ -0,0 +1,9 @@ +-DLINUX +-DWEBSOCKET +-I/usr/include +-Iinclude +-Iinclude/os +-Iinclude/common +-Iinclude/util +-Iinclude/libs/transport +-Itools/shell/inc diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 8300e2e1e3..52cb524e3b 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -243,8 +243,8 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { SShellArgs *pArgs = &shell.args; for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "--usage") == 0 || strcmp(argv[i], "-?") == 0 || - strcmp(argv[i], "/?") == 0) { + if (strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "--usage") == 0 + || strcmp(argv[i], "-?") == 0 || strcmp(argv[i], "/?") == 0) { shellParseSingleOpt('?', NULL); return 0; } @@ -260,8 +260,10 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { return -1; } - if (key[1] == 'h' || key[1] == 'P' || key[1] == 'u' || key[1] == 'a' || key[1] == 'c' || key[1] == 's' || - key[1] == 'f' || key[1] == 'd' || key[1] == 'w' || key[1] == 'n' || key[1] == 'l' || key[1] == 'N' + if (key[1] == 'h' || key[1] == 'P' || key[1] == 'u' + || key[1] == 'a' || key[1] == 'c' || key[1] == 's' + || key[1] == 'f' || key[1] == 'd' || key[1] == 'w' + || key[1] == 'n' || key[1] == 'l' || key[1] == 'N' #ifdef WEBSOCKET || key[1] == 'E' || key[1] == 'T' #endif @@ -277,10 +279,12 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { } shellParseSingleOpt(key[1], val); i++; - } else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'C' || key[1] == 'r' || key[1] == 'k' || key[1] == 't' || - key[1] == 'V' || key[1] == '?' || key[1] == 1 + } else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'C' + || key[1] == 'r' || key[1] == 'k' + || key[1] == 't' || key[1] == 'V' + || key[1] == '?' || key[1] == 1 #ifdef WEBSOCKET - || key[1] == 'R' + ||key[1] == 'R' #endif ) { shellParseSingleOpt(key[1], NULL); diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index 1d81ce4b2f..d8920cb4c3 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -24,7 +24,7 @@ int shell_conn_ws_server(bool first) { ((dsnLen-SHELL_WS_DSN_MASK) > SHELL_WS_DSN_BUFF)? SHELL_WS_DSN_BUFF:(dsnLen-SHELL_WS_DSN_MASK), "%s", shell.args.dsn); - fprintf(stdout, "trying to connect %s*** ", cuttedDsn); + fprintf(stdout, "trying to connect %s****** ", cuttedDsn); fflush(stdout); for (int i = 0; i < shell.args.timeout; i++) { shell.ws_conn = ws_connect_with_dsn(shell.args.dsn); From 63e49706f3a3ff7d1ad9889204730afed59156e3 Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Fri, 14 Apr 2023 09:36:07 +0800 Subject: [PATCH 37/42] docs(taosAdapter): the automatic database creation behavior for the schemaless protocol can be configured (#20929) --- docs/en/14-reference/04-taosadapter.md | 177 +++++++++++++------------ docs/zh/14-reference/04-taosadapter.md | 177 +++++++++++++------------ 2 files changed, 178 insertions(+), 176 deletions(-) diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md index 7ab894a1c7..6bc49768c6 100644 --- a/docs/en/14-reference/04-taosadapter.md +++ b/docs/en/14-reference/04-taosadapter.md @@ -54,94 +54,91 @@ Command-line arguments take precedence over environment variables over configura ```shell Usage of taosAdapter: - --collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd") - --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) - --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") - --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) - --collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" - --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") - --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) - -c, --config string config path default /etc/taos/taosadapter.toml - --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) - --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" - --cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS" - --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" - --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" - --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" - --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true) - --help Print this help message and exit - --httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR" - --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) - --log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL" - --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") - --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) - --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") - --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) - --log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2) - --log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB") - --log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s) - --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") - --monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s) - --monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE" - --monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP" - --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY" - --monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP" - --monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata") - --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) - --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) - --monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") - --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s) - --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD" - --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" - --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" - --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") - --node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE" - --node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s) - --node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING" - --node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD" - --node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME" - --node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true) - --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" - --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") - --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) - --node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL" - --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) - --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") - --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) - --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) - --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) - --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" - --opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL" - --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) - --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") - --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) - --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" - --opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL" - --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") - --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" - --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" - --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" - -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) - --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) - --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) - --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) - --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") - --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) - --statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true) - --statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true) - --statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true) - --statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true) - --statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s) - --statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250) - --statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata") - --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) - --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") - --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" - --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" - --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") - --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) - --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" - --version Print the version and exit + --collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd") + --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) + --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") + --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) + --collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" + --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") + --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) + -c, --config string config path default /etc/taos/taosadapter.toml + --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) + --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" + --cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS" + --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" + --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" + --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true) + --help Print this help message and exit + --httpCodeServerError Use a non-200 http status code when server returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR" + --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) + --log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL" + --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) + --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") + --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) + --log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2) + --log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB") + --log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s) + --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") + --monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s) + --monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE" + --monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP" + --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY" + --monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP" + --monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata") + --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) + --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) + --monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s) + --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD" + --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" + --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" + --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") + --node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE" + --node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s) + --node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING" + --node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD" + --node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME" + --node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true) + --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" + --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") + --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) + --node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL" + --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) + --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") + --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) + --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) + --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) + --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" + --opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL" + --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) + --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") + --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) + --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" + --opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL" + --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") + --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" + --pool.maxConnect int max connections to server. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" + --pool.maxIdle int max idle connections to server. Env "TAOS_ADAPTER_POOL_MAX_IDLE" + -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) + --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) + --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) + --smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB" + --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) + --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) + --statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true) + --statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true) + --statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true) + --statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true) + --statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s) + --statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250) + --statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata") + --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) + --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") + --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" + --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") + --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) + --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" + --tmq.releaseIntervalMultiplierForAutocommit int When set to autocommit, the interval for message release is a multiple of the autocommit interval, with a default value of 2 and a minimum value of 1 and a maximum value of 10. Env "TAOS_ADAPTER_TMQ_RELEASE_INTERVAL_MULTIPLIER_FOR_AUTOCOMMIT" (default 2) + --version Print the version and exit ``` Note: @@ -332,6 +329,10 @@ This parameter controls the number of results returned by the following interfac taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 http status code http status code other than when the C interface returns an error. When set to true, different http status codes will be returned according to the error code returned by C. For details, see [RESTful API](https://docs.tdengine.com/reference/rest-api/) HTTP Response Code chapter. +## Configure whether schemaless writes automatically create DBs + +Starting from version 3.0.4.0, the taosAdapter provides the parameter "smlAutoCreateDB" to control whether to automatically create DBs when writing with the schemaless protocol. The default value is false, which means that the DB will not be automatically created and the user needs to manually create the DB before performing schemaless writing. + ## Troubleshooting You can check the taosAdapter running status with the `systemctl status taosadapter` command. diff --git a/docs/zh/14-reference/04-taosadapter.md b/docs/zh/14-reference/04-taosadapter.md index b8c5f9d647..a10b5b55bc 100644 --- a/docs/zh/14-reference/04-taosadapter.md +++ b/docs/zh/14-reference/04-taosadapter.md @@ -54,94 +54,91 @@ taosAdapter 支持通过命令行参数、环境变量和配置文件来进行 ```shell Usage of taosAdapter: - --collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd") - --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) - --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") - --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) - --collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" - --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") - --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) - -c, --config string config path default /etc/taos/taosadapter.toml - --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) - --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" - --cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS" - --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" - --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" - --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" - --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true) - --help Print this help message and exit - --httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR" - --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) - --log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL" - --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") - --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) - --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") - --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) - --log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2) - --log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB") - --log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s) - --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") - --monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s) - --monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE" - --monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP" - --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY" - --monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP" - --monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata") - --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) - --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) - --monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") - --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s) - --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD" - --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" - --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" - --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") - --node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE" - --node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s) - --node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING" - --node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD" - --node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME" - --node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true) - --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" - --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") - --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) - --node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL" - --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) - --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") - --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) - --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) - --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) - --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" - --opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL" - --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) - --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") - --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) - --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" - --opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL" - --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") - --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" - --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" - --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" - -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) - --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) - --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) - --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) - --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") - --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) - --statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true) - --statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true) - --statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true) - --statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true) - --statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s) - --statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250) - --statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata") - --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) - --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") - --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" - --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" - --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") - --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) - --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" - --version Print the version and exit + --collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd") + --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) + --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") + --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) + --collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" + --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") + --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) + -c, --config string config path default /etc/taos/taosadapter.toml + --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) + --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" + --cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS" + --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" + --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" + --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true) + --help Print this help message and exit + --httpCodeServerError Use a non-200 http status code when server returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR" + --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) + --log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL" + --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) + --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") + --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) + --log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2) + --log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB") + --log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s) + --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") + --monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s) + --monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE" + --monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP" + --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY" + --monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP" + --monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata") + --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) + --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) + --monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s) + --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD" + --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" + --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" + --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") + --node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE" + --node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s) + --node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING" + --node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD" + --node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME" + --node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true) + --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" + --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") + --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) + --node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL" + --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) + --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") + --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) + --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) + --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) + --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" + --opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL" + --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) + --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") + --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) + --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" + --opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL" + --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") + --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" + --pool.maxConnect int max connections to server. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" + --pool.maxIdle int max idle connections to server. Env "TAOS_ADAPTER_POOL_MAX_IDLE" + -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) + --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) + --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) + --smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB" + --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) + --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) + --statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true) + --statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true) + --statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true) + --statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true) + --statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s) + --statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250) + --statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata") + --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) + --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") + --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" + --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") + --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) + --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" + --tmq.releaseIntervalMultiplierForAutocommit int When set to autocommit, the interval for message release is a multiple of the autocommit interval, with a default value of 2 and a minimum value of 1 and a maximum value of 10. Env "TAOS_ADAPTER_TMQ_RELEASE_INTERVAL_MULTIPLIER_FOR_AUTOCOMMIT" (default 2) + --version Print the version and exit ``` 备注: @@ -331,6 +328,10 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1 taosAdapter 通过参数 `httpCodeServerError` 来设置当 C 接口返回错误时是否返回非 200 的 http 状态码。当设置为 true 时将根据 C 返回的错误码返回不同 http 状态码。具体见 [HTTP 响应码](../../connector/rest-api/#http-响应码)。 +## 配置 schemaless 写入是否自动创建 DB + +taosAdapter 从 3.0.4.0 版本开始,提供参数 `smlAutoCreateDB` 来控制在 schemaless 协议写入时是否自动创建 DB。默认值为 false 不自动创建 DB,需要用户手动创建 DB 后进行 schemaless 写入。 + ## 故障解决 您可以通过命令 `systemctl status taosadapter` 来检查 taosAdapter 运行状态。 From 40c8f340133fa5acd82b535bd65e0a4acbd52a03 Mon Sep 17 00:00:00 2001 From: Alex Duan <51781608+DuanKuanJun@users.noreply.github.com> Date: Fri, 14 Apr 2023 11:27:25 +0800 Subject: [PATCH 38/42] Update index.md --- docs/zh/14-reference/12-config/index.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index e5efd77f80..fe23684fde 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -626,6 +626,15 @@ charset 的有效值是 UTF-8。 | 缺省值 | 1 | | 补充说明 | 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
2、手动启动,就在 taosd 执行目录下。 | +### enableScience + +| 属性 | 说明 | +| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| 适用范围 | 仅客户端 TAOS-CLI 适用 | +| 含义 | 是否开启科学计数法显示浮点数 | +| 取值范围 | 0:否,1:是 | +| 缺省值 | 0 | + ### udf | 属性 | 说明 | From 0e73061440318bb925f4ecfcfd25885aac7c8815 Mon Sep 17 00:00:00 2001 From: Alex Duan <51781608+DuanKuanJun@users.noreply.github.com> Date: Fri, 14 Apr 2023 11:40:12 +0800 Subject: [PATCH 39/42] Update index.md --- docs/en/14-reference/12-config/index.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index 430487a3af..2e6f5ec1e2 100644 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -628,6 +628,16 @@ The charset that takes effect is UTF-8. | Default Value | 1 | | Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. | +### enableScience + +| Attribute | Description | +| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| Applicable | Only taos-CLI client | +| Meaning | Whether to show float and double with the scientific notation | +| Value Range | 0: false, 1: true | +| Default Value | 0 | + + ### udf | Attribute | Description | From bafff42323126705870c81b4ae495823e720e136 Mon Sep 17 00:00:00 2001 From: xleili Date: Fri, 14 Apr 2023 17:50:54 +0800 Subject: [PATCH 40/42] docs: release ver-3.0.4.0 --- cmake/cmake.version | 2 +- docs/en/28-releases/01-tdengine.md | 4 ++++ docs/en/28-releases/02-tools.md | 4 ++++ docs/zh/28-releases/01-tdengine.md | 4 ++++ docs/zh/28-releases/02-tools.md | 4 ++++ 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/cmake/cmake.version b/cmake/cmake.version index 5150ee3b75..232e86d891 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.3.2") + SET(TD_VER_NUMBER "3.0.4.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index b160058d02..acfbf6a0ba 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w import Release from "/components/ReleaseV3"; +## 3.0.4.0 + + + ## 3.0.3.2 diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md index 17581b780a..e8f7a54566 100644 --- a/docs/en/28-releases/02-tools.md +++ b/docs/en/28-releases/02-tools.md @@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat import Release from "/components/ReleaseV3"; +## 2.4.12 + + + ## 2.4.11 diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index b4441ad078..0974289c1f 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.0.4.0 + + + ## 3.0.3.2 diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md index cce6834f12..78926555f1 100644 --- a/docs/zh/28-releases/02-tools.md +++ b/docs/zh/28-releases/02-tools.md @@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下: import Release from "/components/ReleaseV3"; +## 2.4.12 + + + ## 2.4.11 From bf35ce76bddfc6612f6eea99433f2b57c2dab8b1 Mon Sep 17 00:00:00 2001 From: xleili Date: Fri, 14 Apr 2023 18:04:52 +0800 Subject: [PATCH 41/42] fix: disable create share dir in edge community dir --- packaging/tools/makepkg.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 0dce526db6..e4df233d67 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -150,7 +150,7 @@ fi mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm -mkdir -p ${install_dir}/share && cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||: +# mkdir -p ${install_dir}/share && cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||: if [ $adapterName != "taosadapter" ]; then mv ${install_dir}/cfg/${clientName2}adapter.toml ${install_dir}/cfg/$adapterName.toml @@ -322,6 +322,7 @@ if [[ $dbName == "taos" ]]; then mkdir -p ${install_dir}/share/ cp -Rfap ${web_dir}/admin ${install_dir}/share/ cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png + cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||: else echo "directory not found for enterprise release: ${web_dir}/admin" fi From 6c3a2e2c5b3823ffd697eb5f686ea22715adac2c Mon Sep 17 00:00:00 2001 From: Adam Ji Date: Sat, 15 Apr 2023 22:38:28 +0800 Subject: [PATCH 42/42] docs: support schemaless insert (#20944) * docs: support schemaless insert * docs: code reformat --- .../14-reference/03-connector/07-python.mdx | 50 ++++++++++++ docs/examples/python/schemaless_insert.py | 21 +++++ docs/examples/python/schemaless_insert_raw.py | 74 ++++++++++++++++++ .../python/schemaless_insert_raw_req_id.py | 76 +++++++++++++++++++ .../python/schemaless_insert_raw_ttl.py | 73 ++++++++++++++++++ .../python/schemaless_insert_req_id.py | 22 ++++++ docs/examples/python/schemaless_insert_ttl.py | 22 ++++++ docs/zh/08-connector/30-python.mdx | 50 ++++++++++++ 8 files changed, 388 insertions(+) create mode 100644 docs/examples/python/schemaless_insert.py create mode 100644 docs/examples/python/schemaless_insert_raw.py create mode 100644 docs/examples/python/schemaless_insert_raw_req_id.py create mode 100644 docs/examples/python/schemaless_insert_raw_ttl.py create mode 100644 docs/examples/python/schemaless_insert_req_id.py create mode 100644 docs/examples/python/schemaless_insert_ttl.py diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index bfbdd929c2..cc5c8f4e69 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -459,6 +459,56 @@ For a more detailed description of the `sql()` method, please refer to [RestClie +### Schemaless Insert + +Connector support schemaless insert. + + + + +Simple insert + +```python +{{#include docs/examples/python/schemaless_insert.py}} +``` + +Insert with ttl argument + +```python +{{#include docs/examples/python/schemaless_insert_ttl.py}} +``` + +Insert with req_id argument + +```python +{{#include docs/examples/python/schemaless_insert_req_id.py}} +``` + + + + + +Simple insert + +```python +{{#include docs/examples/python/schemaless_insert_raw.py}} +``` + +Insert with ttl argument + +```python +{{#include docs/examples/python/schemaless_insert_raw_ttl.py}} +``` + +Insert with req_id argument + +```python +{{#include docs/examples/python/schemaless_insert_raw_req_id.py}} +``` + + + + ### Other sample programs | Example program links | Example program content | diff --git a/docs/examples/python/schemaless_insert.py b/docs/examples/python/schemaless_insert.py new file mode 100644 index 0000000000..334a4b728f --- /dev/null +++ b/docs/examples/python/schemaless_insert.py @@ -0,0 +1,21 @@ +import taos + +conn = taos.connect() +dbname = "pytest_line" +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s precision 'us'" % dbname) +conn.select_db(dbname) + +lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000', +] +conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED) +print("inserted") + +conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED) + +result = conn.query("show tables") +for row in result: + print(row) + +conn.execute("drop database if exists %s" % dbname) diff --git a/docs/examples/python/schemaless_insert_raw.py b/docs/examples/python/schemaless_insert_raw.py new file mode 100644 index 0000000000..0fda7dc505 --- /dev/null +++ b/docs/examples/python/schemaless_insert_raw.py @@ -0,0 +1,74 @@ +import taos +from taos import utils +from taos import TaosConnection +from taos.cinterface import * +from taos.error import OperationalError, SchemalessError + +conn = taos.connect() +dbname = "taos_schemaless_insert" +try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + res = conn.schemaless_insert_raw(lines, 1, 0) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + res = conn.schemaless_insert_raw(lines, 1, 0) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + res = conn.schemaless_insert_raw(lines, 1, 0) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() +except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) +except SchemalessError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) +except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err diff --git a/docs/examples/python/schemaless_insert_raw_req_id.py b/docs/examples/python/schemaless_insert_raw_req_id.py new file mode 100644 index 0000000000..606e510986 --- /dev/null +++ b/docs/examples/python/schemaless_insert_raw_req_id.py @@ -0,0 +1,76 @@ +import taos +from taos import utils +from taos import TaosConnection +from taos.cinterface import * +from taos.error import OperationalError, SchemalessError + +conn = taos.connect() +dbname = "taos_schemaless_insert" +try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() +except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) +except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err diff --git a/docs/examples/python/schemaless_insert_raw_ttl.py b/docs/examples/python/schemaless_insert_raw_ttl.py new file mode 100644 index 0000000000..cf57792534 --- /dev/null +++ b/docs/examples/python/schemaless_insert_raw_ttl.py @@ -0,0 +1,73 @@ +import taos +from taos import utils +from taos import TaosConnection +from taos.cinterface import * +from taos.error import OperationalError, SchemalessError + +conn = taos.connect() +dbname = "taos_schemaless_insert" +try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() +except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) +except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err diff --git a/docs/examples/python/schemaless_insert_req_id.py b/docs/examples/python/schemaless_insert_req_id.py new file mode 100644 index 0000000000..ee1472db69 --- /dev/null +++ b/docs/examples/python/schemaless_insert_req_id.py @@ -0,0 +1,22 @@ +import taos +from taos import SmlProtocol, SmlPrecision + +conn = taos.connect() +dbname = "pytest_line" +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s precision 'us'" % dbname) +conn.select_db(dbname) + +lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000', +] +conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=1) +print("inserted") + +conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=2) + +result = conn.query("show tables") +for row in result: + print(row) + +conn.execute("drop database if exists %s" % dbname) diff --git a/docs/examples/python/schemaless_insert_ttl.py b/docs/examples/python/schemaless_insert_ttl.py new file mode 100644 index 0000000000..85050439f2 --- /dev/null +++ b/docs/examples/python/schemaless_insert_ttl.py @@ -0,0 +1,22 @@ +import taos +from taos import SmlProtocol, SmlPrecision + +conn = taos.connect() +dbname = "pytest_line" +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s precision 'us'" % dbname) +conn.select_db(dbname) + +lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000', +] +conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000) +print("inserted") + +conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000) + +result = conn.query("show tables") +for row in result: + print(row) + +conn.execute("drop database if exists %s" % dbname) diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 5395610df3..1cff142e11 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -484,6 +484,56 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 +### 无模式写入 + +连接器支持无模式写入功能。 + + + + +简单写入 + +```python +{{#include docs/examples/python/schemaless_insert.py}} +``` + +带有 ttl 参数的写入 + +```python +{{#include docs/examples/python/schemaless_insert_ttl.py}} +``` + +带有 req_id 参数的写入 + +```python +{{#include docs/examples/python/schemaless_insert_req_id.py}} +``` + + + + + +简单写入 + +```python +{{#include docs/examples/python/schemaless_insert_raw.py}} +``` + +带有 ttl 参数的写入 + +```python +{{#include docs/examples/python/schemaless_insert_raw_ttl.py}} +``` + +带有 req_id 参数的写入 + +```python +{{#include docs/examples/python/schemaless_insert_raw_req_id.py}} +``` + + + + ### 其它示例程序 | 示例程序链接 | 示例程序内容 |