From 0c85bd1e989a8ef6e8f5a24c96552fc5cc4957e4 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 28 Jun 2022 21:41:32 +0800 Subject: [PATCH 01/14] test: modify testcase of 2 mnodes --- tests/system-test/6-cluster/5dnode2mnode.py | 134 ++++++-------------- 1 file changed, 38 insertions(+), 96 deletions(-) diff --git a/tests/system-test/6-cluster/5dnode2mnode.py b/tests/system-test/6-cluster/5dnode2mnode.py index e08e738be6..509e535f01 100644 --- a/tests/system-test/6-cluster/5dnode2mnode.py +++ b/tests/system-test/6-cluster/5dnode2mnode.py @@ -9,28 +9,22 @@ from util.sql import * from util.cases import * from util.dnodes import TDDnodes from util.dnodes import TDDnode +from util.cluster import * + import time import socket import subprocess +sys.path.append("./6-cluster") -class MyDnodes(TDDnodes): - def __init__(self ,dnodes_lists): - super(MyDnodes,self).__init__() - self.dnodes = dnodes_lists # dnode must be TDDnode instance - self.simDeployed = False - +from clusterCommonCreate import * +from clusterCommonCheck import * + class TDTestCase: - noConn = True - def init(self,conn ,logSql): tdLog.debug(f"start to excute {__file__}") - self.TDDnodes = None - self.depoly_cluster(5) - self.master_dnode = self.TDDnodes.dnodes[0] - self.host=self.master_dnode.cfgDict["fqdn"] - conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) - tdSql.init(conn1.cursor()) - + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -41,90 +35,12 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files or "taosd.exe" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] break return buildPath - - - def depoly_cluster(self ,dnodes_nums): - - testCluster = False - valgrind = 0 - hostname = socket.gethostname() - dnodes = [] - start_port = 6030 - start_port_sec = 6130 - - for num in range(1, dnodes_nums+1): - dnode = TDDnode(num) - dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}") - dnode.addExtraCfg("fqdn", f"{hostname}") - dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}") - dnode.addExtraCfg("monitorFqdn", hostname) - dnode.addExtraCfg("monitorPort", 7043) - dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}") - - dnodes.append(dnode) - - self.TDDnodes = MyDnodes(dnodes) - self.TDDnodes.init("") - self.TDDnodes.setTestCluster(testCluster) - self.TDDnodes.setValgrind(valgrind) - self.TDDnodes.stopAll() - for dnode in self.TDDnodes.dnodes: - self.TDDnodes.deploy(dnode.index,{}) - - for dnode in self.TDDnodes.dnodes: - self.TDDnodes.starttaosd(dnode.index) - - # create cluster - for dnode in self.TDDnodes.dnodes[1:]: - # print(dnode.cfgDict) - dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] - dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] - dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] - cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\"" - print(cmd) - os.system(cmd) - - time.sleep(2) - tdLog.info(" create cluster done! ") - - def five_dnode_one_mnode(self): - tdSql.query("show dnodes;") - tdSql.checkData(0,1,'%s:6030'%self.host) - tdSql.checkData(4,1,'%s:6430'%self.host) - tdSql.checkData(0,4,'ready') - tdSql.checkData(4,4,'ready') - tdSql.query("show mnodes;") - tdSql.checkData(0,1,'%s:6030'%self.host) - tdSql.checkData(0,2,'leader') - tdSql.checkData(0,3,'ready') - - - tdSql.error("create mnode on dnode 1;") - tdSql.error("drop mnode on dnode 1;") - - tdSql.execute("drop database if exists db") - tdSql.execute("create database if not exists db replica 1 duration 300") - tdSql.execute("use db") - tdSql.execute( - '''create table stb1 - (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) - tags (t1 int) - ''' - ) - tdSql.execute( - ''' - create table t1 - (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) - ''' - ) - for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') def five_dnode_two_mnode(self): tdSql.query("show dnodes;") @@ -187,6 +103,34 @@ class TDTestCase: tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") print(tdSql.queryResult) + clusterComCheck.checkDnodes(5) + # restart all taosd + tdDnodes=cluster.dnodes + + # stop follower + tdLog.info("stop follower") + tdDnodes[1].stoptaosd() + if cluster.checkConnectStatus(0) : + print("cluster also work") + + # start follower + tdLog.info("start follower") + tdDnodes[1].starttaosd() + if clusterComCheck.checkMnodeStatus(2) : + print("both mnodes are ready") + + # stop leader + tdLog.info("stop leader") + tdDnodes[0].stoptaosd() + try: + cluster.checkConnectStatus(2) + tdLog.exit(" The election still succeeds when leader of both mnodes are killed ") + except Exception: + pass + tdLog.info("start leader") + tdDnodes[0].starttaosd() + if clusterComCheck.checkMnodeStatus(2) : + print("both mnodes are ready") # # fisrt drop follower of mnode # BUG @@ -229,8 +173,6 @@ class TDTestCase: def run(self): - print(self.master_dnode.cfgDict) - self.five_dnode_one_mnode() self.five_dnode_two_mnode() From 5ddcb862758d86a9b44637145e3d5b7d3754365a Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 28 Jun 2022 21:43:24 +0800 Subject: [PATCH 02/14] test:modify testcase of insertWithMoreVgroups.py --- tests/system-test/1-insert/insertWithMoreVgroup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/1-insert/insertWithMoreVgroup.py b/tests/system-test/1-insert/insertWithMoreVgroup.py index 1ebbc1fc57..aad334f72a 100644 --- a/tests/system-test/1-insert/insertWithMoreVgroup.py +++ b/tests/system-test/1-insert/insertWithMoreVgroup.py @@ -81,7 +81,7 @@ class TDTestCase: user = "root" password = "taosdata" port =6030 - con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port,config="/home/chr/TDengine/sim/psim/cfg/") + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) cur=con.cursor() print(cur) return cur From c5b3183261c5c2635e8d8ae20b4fb4e5f9347403 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 29 Jun 2022 10:05:05 +0800 Subject: [PATCH 03/14] test:modify ways to start 5dnode2mnode.py --- tests/system-test/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index b0b8aa7108..b76d73b54c 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -116,7 +116,7 @@ python3 ./test.py -f 2-query/function_null.py python3 ./test.py -f 2-query/queryQnode.py python3 ./test.py -f 6-cluster/5dnode1mnode.py -python3 ./test.py -f 6-cluster/5dnode2mnode.py +python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 From 5e31c9e8fe664af8be2c4292a65dad9380802e8a Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 29 Jun 2022 17:50:33 +0800 Subject: [PATCH 04/14] refactor(query): function adoption to us/ns db precision TD-16436 --- source/libs/function/src/builtins.c | 4 ++++ source/libs/scalar/src/sclfunc.c | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 6fa7d10ca3..440e12c3d7 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1460,6 +1460,10 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + //add database precision as param + uint8_t dbPrec = pFunc->node.resType.precision; + addDbPrecisonParam(pFunc->pParameterList, dbPrec); + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index b7c171db2e..dc4bf33a2b 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1116,7 +1116,8 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam * int32_t toUnixtimestampFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int32_t type = GET_PARAM_TYPE(pInput); - int32_t timePrec = GET_PARAM_PRECISON(pInput); + int64_t timePrec; + GET_TYPED_DATA(timePrec, int64_t, GET_PARAM_TYPE(&pInput[1]), pInput[1].columnData->pData); for (int32_t i = 0; i < pInput[0].numOfRows; ++i) { if (colDataIsNull_s(pInput[0].columnData, i)) { From 0eed30b0f028a68bd08ad23d26e21d5a81509137 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 29 Jun 2022 20:46:42 +0800 Subject: [PATCH 05/14] test:modify testcase of muti-mondes --- .../system-test/6-cluster/5dnode3mnodeStop.py | 14 +-- .../6-cluster/5dnode3mnodeStopLoop.py | 118 ++++++++++++++++++ tests/system-test/fulltest.sh | 5 +- 3 files changed, 122 insertions(+), 15 deletions(-) create mode 100644 tests/system-test/6-cluster/5dnode3mnodeStopLoop.py diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop.py b/tests/system-test/6-cluster/5dnode3mnodeStop.py index 1b55176e1d..f932e5537e 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop.py @@ -88,7 +88,7 @@ class TDTestCase: tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") - print(tdSql.queryResult) + # print(tdSql.queryResult) clusterComCheck.checkDnodes(dnodenumbers) # restart all taosd tdDnodes=cluster.dnodes @@ -108,18 +108,6 @@ class TDTestCase: tdDnodes[0].starttaosd() clusterComCheck.checkMnodeStatus(3) - tdLog.info("Take turns stopping all dnodes ") - # seperate vnode and mnode in different dnodes. - # create database and stable - stopcount =0 - while stopcount <= 2: - tdLog.info("first restart loop") - for i in range(dnodenumbers): - tdDnodes[i].stoptaosd() - tdDnodes[i].starttaosd() - stopcount+=1 - clusterComCheck.checkDnodes(dnodenumbers) - clusterComCheck.checkMnodeStatus(3) def run(self): # print(self.master_dnode.cfgDict) diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py b/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py new file mode 100644 index 0000000000..e0c91e5ac4 --- /dev/null +++ b/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py @@ -0,0 +1,118 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * +from test import tdDnodes +sys.path.append("./6-cluster") + +from clusterCommonCreate import * +from clusterCommonCheck import * +import time +import socket +import subprocess +from multiprocessing import Process + + +class TDTestCase: + + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'replica': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + dnodenumbers=int(dnodenumbers) + mnodeNums=int(mnodeNums) + dbNumbers = int(dnodenumbers * restartNumber) + + tdLog.info("first check dnode and mnode") + tdSql.query("show dnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(4,1,'%s:6430'%self.host) + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkMnodeStatus(1) + + # fisr add three mnodes; + tdLog.info("fisr add three mnodes and check mnode status") + tdSql.execute("create mnode on dnode 2") + clusterComCheck.checkMnodeStatus(2) + tdSql.execute("create mnode on dnode 3") + clusterComCheck.checkMnodeStatus(3) + + # add some error operations and + tdLog.info("Confirm the status of the dnode again") + tdSql.error("create mnode on dnode 2") + tdSql.query("show dnodes;") + # print(tdSql.queryResult) + clusterComCheck.checkDnodes(dnodenumbers) + # restart all taosd + tdDnodes=cluster.dnodes + + tdLog.info("Take turns stopping all dnodes ") + # seperate vnode and mnode in different dnodes. + # create database and stable + stopcount =0 + while stopcount <= 2: + tdLog.info(" restart loop: %d"%stopcount ) + for i in range(dnodenumbers): + tdDnodes[i].stoptaosd() + tdDnodes[i].starttaosd() + stopcount+=1 + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkMnodeStatus(3) + + def run(self): + # print(self.master_dnode.cfgDict) + self.fiveDnodeThreeMnode(5,3,1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index b76d73b54c..436c2ed349 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -118,15 +118,16 @@ python3 ./test.py -f 2-query/queryQnode.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3 +# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3 # python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py # python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 -python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3 +# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3 python3 ./test.py -f 7-tmq/basic5.py From 4c23223447f7c8d4771939e1b0a740e3f5e3be26 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 29 Jun 2022 17:50:33 +0800 Subject: [PATCH 06/14] refactor(query): function adoption to us/ns db precision TD-16436 --- source/libs/function/src/builtins.c | 39 ++++++++++++++++------------- source/libs/scalar/src/sclfunc.c | 10 ++++++-- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 440e12c3d7..bfb638360d 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -39,6 +39,20 @@ static int32_t invaildFuncParaValueErrMsg(char* pErrBuf, int32_t len, const char return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_PARA_VALUE, "Invalid parameter value : %s", pFuncName); } +void static addDbPrecisonParam(SNodeList** pList, uint8_t precision) { + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + pVal->literal = NULL; + pVal->isDuration = false; + pVal->translate = true; + pVal->node.resType.type = TSDB_DATA_TYPE_TINYINT; + pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TINYINT].bytes; + pVal->node.resType.precision = precision; + pVal->datum.i = (int64_t)precision; + pVal->typeData = (int64_t)precision; + + nodesListMakeAppend(pList, (SNode*)pVal); +} + // There is only one parameter of numeric type, and the return type is parameter type static int32_t translateInOutNum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { @@ -222,6 +236,11 @@ static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t l static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { // pseudo column do not need to check parameters + + //add database precision as param + uint8_t dbPrec = pFunc->node.resType.precision; + addDbPrecisonParam(&pFunc->pParameterList, dbPrec); + pFunc->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_TIMESTAMP}; return TSDB_CODE_SUCCESS; } @@ -1379,20 +1398,6 @@ static bool validateTimezoneFormat(const SValueNode* pVal) { return true; } -void static addDbPrecisonParam(SNodeList* pList, uint8_t precision) { - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); - pVal->literal = NULL; - pVal->isDuration = false; - pVal->translate = true; - pVal->node.resType.type = TSDB_DATA_TYPE_TINYINT; - pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TINYINT].bytes; - pVal->node.resType.precision = precision; - pVal->datum.i = (int64_t)precision; - pVal->typeData = (int64_t)precision; - - nodesListAppend(pList, (SNode*)pVal); -} - void static addTimezoneParam(SNodeList* pList) { char buf[6] = {0}; time_t t = taosTime(NULL); @@ -1462,7 +1467,7 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int //add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; - addDbPrecisonParam(pFunc->pParameterList, dbPrec); + addDbPrecisonParam(&pFunc->pParameterList, dbPrec); pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; return TSDB_CODE_SUCCESS; @@ -1482,7 +1487,7 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_ //add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; - addDbPrecisonParam(pFunc->pParameterList, dbPrec); + addDbPrecisonParam(&pFunc->pParameterList, dbPrec); pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP}; @@ -1510,7 +1515,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le //add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; - addDbPrecisonParam(pFunc->pParameterList, dbPrec); + addDbPrecisonParam(&pFunc->pParameterList, dbPrec); pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; return TSDB_CODE_SUCCESS; diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index dc4bf33a2b..d350a380e0 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1515,7 +1515,10 @@ int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p } int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - int64_t ts = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI); + int64_t timePrec; + GET_TYPED_DATA(timePrec, int64_t, GET_PARAM_TYPE(&pInput[0]), pInput[0].columnData->pData); + + int64_t ts = taosGetTimestamp(timePrec); for (int32_t i = 0; i < pInput->numOfRows; ++i) { colDataAppendInt64(pOutput->columnData, i, &ts); } @@ -1524,7 +1527,10 @@ int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutpu } int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - int64_t ts = taosGetTimestampToday(TSDB_TIME_PRECISION_MILLI); + int64_t timePrec; + GET_TYPED_DATA(timePrec, int64_t, GET_PARAM_TYPE(&pInput[1]), pInput[1].columnData->pData); + + int64_t ts = taosGetTimestampToday(timePrec); for (int32_t i = 0; i < pInput->numOfRows; ++i) { colDataAppendInt64(pOutput->columnData, i, &ts); } From b1670f8b82efad7c24e964e5c8ae35e83c484f0e Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 29 Jun 2022 17:50:33 +0800 Subject: [PATCH 07/14] refactor(query): function adoption to us/ns db precision TD-16436 --- source/libs/scalar/src/sclfunc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index d350a380e0..76c0e48740 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1528,7 +1528,7 @@ int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutpu int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int64_t timePrec; - GET_TYPED_DATA(timePrec, int64_t, GET_PARAM_TYPE(&pInput[1]), pInput[1].columnData->pData); + GET_TYPED_DATA(timePrec, int64_t, GET_PARAM_TYPE(&pInput[0]), pInput[0].columnData->pData); int64_t ts = taosGetTimestampToday(timePrec); for (int32_t i = 0; i < pInput->numOfRows; ++i) { From cf35174d539d5b0b59ff6776af9121ce5c5d95c7 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 30 Jun 2022 10:23:35 +0800 Subject: [PATCH 08/14] feat: the last_row function supports all scenes --- include/libs/function/functionMgt.h | 1 + include/libs/nodes/plannodes.h | 1 + source/libs/function/src/builtins.c | 13 +++++++ source/libs/planner/src/planLogicCreater.c | 8 ++-- source/libs/planner/src/planOptimizer.c | 43 +++++++++++++++++++++- source/libs/planner/src/planSpliter.c | 2 + source/libs/planner/test/planBasicTest.cpp | 2 + 7 files changed, 66 insertions(+), 4 deletions(-) diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 1ed78750d1..7697dd5047 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -125,6 +125,7 @@ typedef enum EFunctionType { FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function FUNCTION_TYPE_TO_COLUMN, FUNCTION_TYPE_GROUP_KEY, + FUNCTION_TYPE_CACHE_LAST_ROW, // distributed splitting functions FUNCTION_TYPE_APERCENTILE_PARTIAL = 4000, diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 4671c8b81e..f8d79a863f 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -91,6 +91,7 @@ typedef struct SAggLogicNode { SLogicNode node; SNodeList* pGroupKeys; SNodeList* pAggFuncs; + bool hasLastRow; } SAggLogicNode; typedef struct SProjectLogicNode { diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index fbebb12cc3..85a2ab22b6 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1882,6 +1882,19 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "last_row", .type = FUNCTION_TYPE_LAST_ROW, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC, + .translateFunc = translateFirstLast, + .getEnvFunc = getFirstLastFuncEnv, + .initFunc = functionSetup, + .processFunc = lastFunction, + .finalizeFunc = firstLastFinalize, + .pPartialFunc = "_last_partial", + .pMergeFunc = "_last_merge", + .combineFunc = lastCombine, + }, + { + .name = "_cache_last_row", + .type = FUNCTION_TYPE_CACHE_LAST_ROW, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateLastRow, .getEnvFunc = getMinmaxFuncEnv, .initFunc = minmaxFunctionSetup, diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index ef8b109b62..05256e2696 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -160,9 +160,9 @@ static EScanType getScanType(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SNod return SCAN_TYPE_STREAM; } - if (pSelect->hasLastRowFunc) { - return SCAN_TYPE_LAST_ROW; - } + // if (pSelect->hasLastRowFunc) { + // return SCAN_TYPE_LAST_ROW; + // } if (NULL == pScanCols) { // select count(*) from t @@ -474,6 +474,8 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, return TSDB_CODE_OUT_OF_MEMORY; } + pAgg->hasLastRow = pSelect->hasLastRowFunc; + int32_t code = TSDB_CODE_SUCCESS; // set grouyp keys, agg funcs and having conditions diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 42f7f744c5..b733d75fb2 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -1616,6 +1616,46 @@ static int32_t rewriteUniqueOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog return rewriteUniqueOptimizeImpl(pCxt, pLogicSubplan, pIndef); } +static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) { + if (QUERY_NODE_LOGIC_PLAN_AGG != nodeType(pNode) || !(((SAggLogicNode*)pNode)->hasLastRow) || + NULL != ((SAggLogicNode*)pNode)->pGroupKeys || 1 != LIST_LENGTH(pNode->pChildren) || + QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pNode->pChildren, 0)) || + NULL != ((SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0))->node.pConditions) { + return false; + } + + SNode* pFunc = NULL; + FOREACH(pFunc, ((SAggLogicNode*)pNode)->pAggFuncs) { + if (FUNCTION_TYPE_LAST_ROW != ((SFunctionNode*)pFunc)->funcType) { + return false; + } + } + + return true; +} + +static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) { + SAggLogicNode* pAgg = (SAggLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, lastRowScanOptMayBeOptimized); + + if (NULL == pAgg) { + return TSDB_CODE_SUCCESS; + } + + SNode* pNode = NULL; + FOREACH(pNode, pAgg->pAggFuncs) { + SFunctionNode* pFunc = (SFunctionNode*)pNode; + int32_t len = snprintf(pFunc->functionName, sizeof(pFunc->functionName), "_cache_last_row"); + pFunc->functionName[len] = '\0'; + fmGetFuncInfo(pFunc, NULL, 0); + } + pAgg->hasLastRow = false; + + ((SScanLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))->scanType = SCAN_TYPE_LAST_ROW; + + pCxt->optimized = true; + return TSDB_CODE_SUCCESS; +} + // merge projects static bool mergeProjectsMayBeOptimized(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren)) { @@ -1704,7 +1744,8 @@ static const SOptimizeRule optimizeRuleSet[] = { {.pName = "EliminateProject", .optimizeFunc = eliminateProjOptimize}, {.pName = "EliminateSetOperator", .optimizeFunc = eliminateSetOpOptimize}, {.pName = "RewriteTail", .optimizeFunc = rewriteTailOptimize}, - {.pName = "RewriteUnique", .optimizeFunc = rewriteUniqueOptimize} + {.pName = "RewriteUnique", .optimizeFunc = rewriteUniqueOptimize}, + {.pName = "LastRowScan", .optimizeFunc = lastRowScanOptimize} }; // clang-format on diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 9d23df5bda..2bc226804f 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -197,6 +197,8 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode); case QUERY_NODE_LOGIC_PLAN_JOIN: return !(((SJoinLogicNode*)pNode)->isSingleTableJoin); + // case QUERY_NODE_LOGIC_PLAN_PARTITION: + // return stbSplHasMultiTbScan(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_AGG: return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_WINDOW: diff --git a/source/libs/planner/test/planBasicTest.cpp b/source/libs/planner/test/planBasicTest.cpp index ff725c444e..b886fca2af 100644 --- a/source/libs/planner/test/planBasicTest.cpp +++ b/source/libs/planner/test/planBasicTest.cpp @@ -99,6 +99,8 @@ TEST_F(PlanBasicTest, lastRowFunc) { run("SELECT LAST_ROW(c1, c2) FROM t1"); run("SELECT LAST_ROW(c1) FROM st1"); + + run("SELECT LAST_ROW(c1), SUM(c3) FROM t1"); } TEST_F(PlanBasicTest, sampleFunc) { From 2ad65998107b88498917bcb902500e9912c4efef Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 30 Jun 2022 11:04:24 +0800 Subject: [PATCH 09/14] feat: partition by distributed split --- source/libs/planner/src/planLogicCreater.c | 10 +++------- source/libs/planner/src/planPhysiCreater.c | 2 +- source/libs/planner/src/planSpliter.c | 19 ++++++++++++++++--- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 05256e2696..d170482c48 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -154,16 +154,12 @@ static int32_t createSelectRootLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p return createRootLogicNode(pCxt, pSelect, pSelect->precision, (FCreateLogicNode)func, pRoot); } -static EScanType getScanType(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SNodeList* pScanPseudoCols, - SNodeList* pScanCols, int8_t tableType) { +static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols, SNodeList* pScanCols, + int8_t tableType) { if (pCxt->pPlanCxt->topicQuery || pCxt->pPlanCxt->streamQuery) { return SCAN_TYPE_STREAM; } - // if (pSelect->hasLastRowFunc) { - // return SCAN_TYPE_LAST_ROW; - // } - if (NULL == pScanCols) { // select count(*) from t return NULL == pScanPseudoCols @@ -279,7 +275,7 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect code = rewriteExprsForSelect(pScan->pScanPseudoCols, pSelect, SQL_CLAUSE_FROM); } - pScan->scanType = getScanType(pCxt, pSelect, pScan->pScanPseudoCols, pScan->pScanCols, pScan->tableType); + pScan->scanType = getScanType(pCxt, pScan->pScanPseudoCols, pScan->pScanCols, pScan->tableType); if (TSDB_CODE_SUCCESS == code) { code = addPrimaryKeyCol(pScan->tableId, &pScan->pScanCols); diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 46747af3a9..0f19db26a5 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1344,7 +1344,7 @@ static int32_t createMergePhysiNode(SPhysiPlanContext* pCxt, SMergeLogicNode* pM } } - if (TSDB_CODE_SUCCESS == code) { + if (TSDB_CODE_SUCCESS == code && NULL != pMergeLogicNode->pMergeKeys) { code = setListSlotId(pCxt, pMerge->node.pOutputDataBlockDesc->dataBlockId, -1, pMergeLogicNode->pMergeKeys, &pMerge->pMergeKeys); } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 2bc226804f..60c04c2c30 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -197,8 +197,8 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode); case QUERY_NODE_LOGIC_PLAN_JOIN: return !(((SJoinLogicNode*)pNode)->isSingleTableJoin); - // case QUERY_NODE_LOGIC_PLAN_PARTITION: - // return stbSplHasMultiTbScan(streamQuery, pNode); + case QUERY_NODE_LOGIC_PLAN_PARTITION: + return stbSplHasMultiTbScan(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_AGG: return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_WINDOW: @@ -433,7 +433,7 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo SNodeList* pMergeKeys = NULL; code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pInfo->pSplitNode)->pTspk, &pMergeKeys); if (TSDB_CODE_SUCCESS == code) { - code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, pMergeKeys, pPartWindow, false); + code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, pMergeKeys, pPartWindow, true); } if (TSDB_CODE_SUCCESS != code) { nodesDestroyList(pMergeKeys); @@ -889,6 +889,16 @@ static int32_t stbSplSplitJoinNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) return code; } +static int32_t stbSplSplitPartitionNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + int32_t code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pInfo->pSplitNode, NULL, pInfo->pSplitNode, true); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, + (SNode*)splCreateScanSubplan(pCxt, pInfo->pSplitNode, SPLIT_FLAG_STABLE_SPLIT)); + } + ++(pCxt->groupId); + return code; +} + static int32_t stableSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { if (pCxt->pPlanCxt->rSmaQuery) { return TSDB_CODE_SUCCESS; @@ -907,6 +917,9 @@ static int32_t stableSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { case QUERY_NODE_LOGIC_PLAN_JOIN: code = stbSplSplitJoinNode(pCxt, &info); break; + case QUERY_NODE_LOGIC_PLAN_PARTITION: + code = stbSplSplitPartitionNode(pCxt, &info); + break; case QUERY_NODE_LOGIC_PLAN_AGG: code = stbSplSplitAggNode(pCxt, &info); break; From d53e1982add2e668b818eab16529003a6324d702 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Thu, 30 Jun 2022 11:18:21 +0800 Subject: [PATCH 10/14] refactor: tref used for rsma fetch task --- source/dnode/vnode/src/inc/sma.h | 13 ++-- source/dnode/vnode/src/sma/smaEnv.c | 2 +- source/dnode/vnode/src/sma/smaRollup.c | 95 ++++++++++++++------------ 3 files changed, 57 insertions(+), 53 deletions(-) diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index 3e15fd9c57..b455d779c1 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -48,7 +48,6 @@ struct SSmaEnv { typedef struct { int32_t smaRef; - int32_t refId; } SSmaMgmt; #define SMA_ENV_LOCK(env) ((env)->lock) @@ -63,12 +62,12 @@ struct STSmaStat { struct SRSmaStat { SSma *pSma; - int64_t refId; - void *tmrHandle; - tmr_h tmrId; - int32_t tmrSeconds; - int8_t triggerStat; - int8_t runningStat; + int64_t refId; // shared by persistence/fetch tasks + void *tmrHandle; // for persistence task + tmr_h tmrId; // for persistence task + int32_t tmrSeconds; // for persistence task + int8_t triggerStat; // for persistence task + int8_t runningStat; // for persistence task SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo; }; diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index e70e79fb6f..120d6612a2 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -135,7 +135,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS // init smaMgmt smaMgmt.smaRef = taosOpenRef(SMA_MGMT_REF_NUM, tdDestroyRSmaStat); - if (smaMgmt.refId < 0) { + if (smaMgmt.smaRef < 0) { smaError("init smaRef failed, num:%d", SMA_MGMT_REF_NUM); terrno = TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_FAILED; diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index bd74bbd639..bf662ddc05 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -50,6 +50,7 @@ static int32_t tdRSmaRestoreTSDataReload(SSma *pSma); struct SRSmaInfoItem { SRSmaInfo *pRsmaInfo; + int64_t refId; void *taskInfo; // qTaskInfo_t tmr_h tmrId; int8_t level; @@ -60,11 +61,14 @@ struct SRSmaInfoItem { struct SRSmaInfo { STSchema *pTSchema; - SSma *pSma; + SRSmaStat *pStat; int64_t suid; SRSmaInfoItem items[TSDB_RETENTION_L2]; }; +#define RSMA_INFO_SMA(r) ((r)->pStat->pSma) +#define RSMA_INFO_STAT(r) ((r)->pStat) + struct SRSmaQTaskInfoItem { int32_t len; int8_t type; @@ -107,22 +111,21 @@ static FORCE_INLINE void tdFreeTaskHandle(qTaskInfo_t *taskHandle, int32_t vgId, void *tdFreeRSmaInfo(SRSmaInfo *pInfo) { if (pInfo) { + SSma *pSma = RSMA_INFO_SMA(pInfo); for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { SRSmaInfoItem *pItem = &pInfo->items[i]; if (pItem->taskInfo) { - smaDebug("vgId:%d, stb %" PRIi64 " stop fetch-timer %p level %d", SMA_VID(pInfo->pSma), pInfo->suid, - pItem->tmrId, i + 1); + smaDebug("vgId:%d, stb %" PRIi64 " stop fetch-timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId, + i + 1); taosTmrStopA(&pItem->tmrId); - tdFreeTaskHandle(&pItem->taskInfo, SMA_VID(pInfo->pSma), i + 1); + tdFreeTaskHandle(&pItem->taskInfo, SMA_VID(pSma), i + 1); } else { - smaDebug("vgId:%d, stb %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", - SMA_VID(pInfo->pSma), pInfo->suid, i + 1); + smaDebug("vgId:%d, stb %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma), + pInfo->suid, i + 1); } } taosMemoryFree(pInfo->pTSchema); taosMemoryFree(pInfo); - } else { - smaDebug("vgId:%d, stb %" PRIi64 " no need to destroy rsma info since empty", SMA_VID(pInfo->pSma), pInfo->suid); } return NULL; @@ -255,6 +258,7 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaInfo if (param->qmsg[idx]) { SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]); + pItem->refId = RSMA_REF_ID(pRSmaInfo->pStat); pItem->pRsmaInfo = pRSmaInfo; pItem->taskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], pReadHandle); if (!pItem->taskInfo) { @@ -340,7 +344,7 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con goto _err; } pRSmaInfo->pTSchema = pTSchema; - pRSmaInfo->pSma = pSma; + pRSmaInfo->pStat = pStat; pRSmaInfo->suid = suid; if (tdSetRSmaInfoItemParams(pSma, param, pRSmaInfo, &handle, 0) < 0) { @@ -522,7 +526,7 @@ static void tdDestroySDataBlockArray(SArray *pArray) { static int32_t tdFetchAndSubmitRSmaResult(SRSmaInfoItem *pItem, int8_t blkType) { SArray *pResult = NULL; SRSmaInfo *pRSmaInfo = pItem->pRsmaInfo; - SSma *pSma = pRSmaInfo->pSma; + SSma *pSma = RSMA_INFO_SMA(pRSmaInfo); while (1) { SSDataBlock *output = NULL; @@ -585,21 +589,29 @@ _err: */ static void tdRSmaFetchTrigger(void *param, void *tmrId) { SRSmaInfoItem *pItem = param; - SSma *pSma = pItem->pRsmaInfo->pSma; - SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT((SSmaEnv *)pSma->pRSmaEnv); + SSma *pSma = NULL; + SRSmaStat *pStat = (SRSmaStat *)taosAcquireRef(smaMgmt.smaRef, pItem->refId); + if (!pStat) { + smaDebug("rsma fetch task not start since already destroyed"); + return; + } + pSma = RSMA_INFO_SMA(pItem->pRsmaInfo); + + // if rsma trigger stat in cancelled or finished, not start fetch task anymore int8_t rsmaTriggerStat = atomic_load_8(RSMA_TRIGGER_STAT(pStat)); if (rsmaTriggerStat == TASK_TRIGGER_STAT_CANCELLED || rsmaTriggerStat == TASK_TRIGGER_STAT_FINISHED) { - smaDebug("vgId:%d, level %" PRIi8 " not fetch since stat is cancelled for table suid:%" PRIi64, SMA_VID(pSma), - pItem->level, pItem->pRsmaInfo->suid); + taosReleaseRef(smaMgmt.smaRef, pItem->refId); + smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is cancelled", + SMA_VID(pSma), pItem->level, pItem->pRsmaInfo->suid); return; } int8_t fetchTriggerStat = atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE); if (fetchTriggerStat == TASK_TRIGGER_STAT_ACTIVE) { - smaDebug("vgId:%d, level %" PRIi8 " stat is active for table suid:%" PRIi64, SMA_VID(pSma), pItem->level, - pItem->pRsmaInfo->suid); + smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma), + pItem->level, pItem->pRsmaInfo->suid); tdRefSmaStat(pSma, (SSmaStat *)pStat); @@ -610,9 +622,11 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { tdUnRefSmaStat(pSma, (SSmaStat *)pStat); } else { - smaDebug("vgId:%d, level %" PRIi8 " stat is inactive for table suid:%" PRIi64, SMA_VID(pSma), pItem->level, - pItem->pRsmaInfo->suid); + smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is inactive", + SMA_VID(pSma), pItem->level, pItem->pRsmaInfo->suid); } +_end: + taosReleaseRef(smaMgmt.smaRef, pItem->refId); } static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfoItem *pItem, tb_uid_t suid, @@ -632,7 +646,6 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType tdFetchAndSubmitRSmaResult(pItem, STREAM_DATA_TYPE_SUBMIT_BLOCK); atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); - smaDebug("vgId:%d, process rsma insert", SMA_VID(pSma)); SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); SRSmaStat *pStat = SMA_RSMA_STAT(pEnv->pStat); @@ -1036,7 +1049,7 @@ static void *tdRSmaPersistExec(void *param) { for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { qTaskInfo_t taskInfo = pRSmaInfo->items[i].taskInfo; if (!taskInfo) { - smaDebug("vgId:%d, table %" PRIi64 " level %d qTaskInfo is NULL", vid, pRSmaInfo->suid, i + 1); + smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d qTaskInfo is NULL", vid, pRSmaInfo->suid, i + 1); continue; } @@ -1044,27 +1057,20 @@ static void *tdRSmaPersistExec(void *param) { int32_t len = 0; int8_t type = (int8_t)(i + 1); if (qSerializeTaskStatus(taskInfo, &pOutput, &len) < 0) { - smaError("vgId:%d, table %" PRIi64 " level %d serialize rsma task failed since %s", vid, pRSmaInfo->suid, i + 1, - terrstr(terrno)); + smaError("vgId:%d, rsma, table %" PRIi64 " level %d serialize qTaskInfo failed since %s", vid, pRSmaInfo->suid, + i + 1, terrstr(terrno)); goto _err; } if (!pOutput || len <= 0) { - smaDebug("vgId:%d, table %" PRIi64 " level %d serialize rsma task success but no output(len %d), not persist", + smaDebug("vgId:%d, rsma, table %" PRIi64 + " level %d serialize qTaskInfo success but no output(len %d), not persist", vid, pRSmaInfo->suid, i + 1, len); taosMemoryFreeClear(pOutput); continue; } - smaDebug("vgId:%d, table %" PRIi64 " level %d serialize rsma task success with len %d, need persist", vid, + smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d serialize qTaskInfo success with len %d, need persist", vid, pRSmaInfo->suid, i + 1, len); -#if 0 - if (qDeserializeTaskStatus(taskInfo, pOutput, len) < 0) { - smaError("vgId:%d, table %" PRIi64 "level %d deserialize rsma task failed since %s", vid, pRSmaInfo->suid, - i + 1, terrstr(terrno)); - } else { - smaDebug("vgId:%d, table %" PRIi64 " level %d deserialize rsma task success", vid, pRSmaInfo->suid, i + 1); - } -#endif if (!isFileCreated) { char qTaskInfoFName[TSDB_FILENAME_LEN]; @@ -1084,11 +1090,11 @@ static void *tdRSmaPersistExec(void *param) { ASSERT(headLen <= RSMA_QTASKINFO_HEAD_LEN); tdAppendTFile(&tFile, (void *)&tmpBuf, headLen, &toffset); - smaDebug("vgId:%d, table %" PRIi64 " level %d head part(len:%d) appended to offset:%" PRIi64, vid, + smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d head part(len:%d) appended to offset:%" PRIi64, vid, pRSmaInfo->suid, i + 1, headLen, toffset); tdAppendTFile(&tFile, pOutput, len, &toffset); - smaDebug("vgId:%d, table %" PRIi64 " level %d body part len:%d appended to offset:%" PRIi64, vid, pRSmaInfo->suid, - i + 1, len, toffset); + smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d body part len:%d appended to offset:%" PRIi64, vid, + pRSmaInfo->suid, i + 1, len, toffset); taosMemoryFree(pOutput); } @@ -1098,13 +1104,13 @@ static void *tdRSmaPersistExec(void *param) { _normal: if (isFileCreated) { if (tdUpdateTFileHeader(&tFile) < 0) { - smaError("vgId:%d, failed to update tfile %s header since %s", vid, TD_TFILE_FULL_NAME(&tFile), + smaError("vgId:%d, rsma, failed to update tfile %s header since %s", vid, TD_TFILE_FULL_NAME(&tFile), tstrerror(terrno)); tdCloseTFile(&tFile); tdRemoveTFile(&tFile); goto _err; } else { - smaDebug("vgId:%d, succeed to update tfile %s header", vid, TD_TFILE_FULL_NAME(&tFile)); + smaDebug("vgId:%d, rsma, succeed to update tfile %s header", vid, TD_TFILE_FULL_NAME(&tFile)); } tdCloseTFile(&tFile); @@ -1114,10 +1120,10 @@ _normal: char *pos = strstr(newFName, tdQTaskInfoFname[TD_QTASK_TMP_F]); strncpy(pos, tdQTaskInfoFname[TD_QTASK_TMP_F], TSDB_FILENAME_LEN - POINTER_DISTANCE(pos, newFName)); if (taosRenameFile(TD_TFILE_FULL_NAME(&tFile), newFName) != 0) { - smaError("vgId:%d, failed to rename %s to %s", vid, TD_TFILE_FULL_NAME(&tFile), newFName); + smaError("vgId:%d, rsma, failed to rename %s to %s", vid, TD_TFILE_FULL_NAME(&tFile), newFName); goto _err; } else { - smaDebug("vgId:%d, succeed to rename %s to %s", vid, TD_TFILE_FULL_NAME(&tFile), newFName); + smaDebug("vgId:%d, rsma, succeed to rename %s to %s", vid, TD_TFILE_FULL_NAME(&tFile), newFName); } } goto _end; @@ -1129,13 +1135,13 @@ _end: if (TASK_TRIGGER_STAT_INACTIVE == atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_INACTIVE, TASK_TRIGGER_STAT_ACTIVE)) { - smaDebug("vgId:%d, persist task is active again", vid); + smaDebug("vgId:%d, rsma persist task is active again", vid); } else if (TASK_TRIGGER_STAT_CANCELLED == atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_CANCELLED, TASK_TRIGGER_STAT_FINISHED)) { - smaDebug("vgId:%d, persist task is cancelled", vid); + smaDebug("vgId:%d, rsma persist task is cancelled", vid); } else { - smaWarn("vgId:%d, persist task in abnormal stat %" PRIi8, vid, atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat))); + smaWarn("vgId:%d, rsma persist task in abnormal stat %" PRIi8, vid, atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat))); ASSERT(0); } atomic_store_8(RSMA_RUNNING_STAT(pRSmaStat), 0); @@ -1179,9 +1185,8 @@ static void tdRSmaPersistTask(SRSmaStat *pRSmaStat) { */ static void tdRSmaPersistTrigger(void *param, void *tmrId) { SRSmaStat *rsmaStat = param; - int64_t refId = rsmaStat->refId; + SRSmaStat *pRSmaStat = (SRSmaStat *)taosAcquireRef(smaMgmt.smaRef, rsmaStat->refId); - SRSmaStat *pRSmaStat = (SRSmaStat *)taosAcquireRef(smaMgmt.smaRef, refId); if (!pRSmaStat) { smaDebug("rsma persistence task not start since already destroyed"); return; @@ -1221,5 +1226,5 @@ static void tdRSmaPersistTrigger(void *param, void *tmrId) { smaWarn("rsma persistence not start since unknown stat %" PRIi8, tmrStat); } break; } - taosReleaseRef(smaMgmt.smaRef, refId); + taosReleaseRef(smaMgmt.smaRef, rsmaStat->refId); } From 63d799feebe277a674818df76291984303d0dc01 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 30 Jun 2022 11:32:00 +0800 Subject: [PATCH 11/14] refactor(query): set stateduration function default time unit as db precision --- source/libs/function/src/builtinsimpl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index e40ab3784d..a75d5a2ecb 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -4283,7 +4283,7 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) { SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; // TODO: process timeUnit for different db precisions - int32_t timeUnit = 1000; + int32_t timeUnit = 1; if (pCtx->numOfParams == 5) { // TODO: param number incorrect timeUnit = pCtx->param[3].param.i; } From b0b5d417af60221264a34c9ea2d233b463414f9d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 30 Jun 2022 11:33:06 +0800 Subject: [PATCH 12/14] modify test case --- tests/system-test/2-query/stateduration.py | 72 +++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/tests/system-test/2-query/stateduration.py b/tests/system-test/2-query/stateduration.py index fa71009ef2..23169553dc 100644 --- a/tests/system-test/2-query/stateduration.py +++ b/tests/system-test/2-query/stateduration.py @@ -38,15 +38,15 @@ class TDTestCase: tdSql.query(f"select stateduration(col{i},'{j}',5) from test") tdSql.checkRows(10) if j in ['LT' ,'lt','Lt','lT']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) elif j in ['GT','gt', 'Gt','gT']: - tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) elif j in ['LE','le','Le','lE']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) elif j in [ 'GE','ge','Ge','gE']: - tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,), (0,)]) + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) elif j in ['NE','ne','Ne','nE']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) for i in float_list: @@ -54,11 +54,11 @@ class TDTestCase: tdSql.query(f"select stateduration(col{i},'{j}',5) from test") tdSql.checkRows(10) if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: - tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) elif j in ['NE','ne','Ne','nE']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) @@ -66,34 +66,34 @@ class TDTestCase: for i in error_column_list: for j in self.param_list: tdSql.error(f"select stateduration({i},{j},5) from test") - + error_param_list = ['a',1] for i in error_param_list: tdSql.error(f"select stateduration(col1,{i},5) from test") - + # timestamp = 1s, time_unit =1s tdSql.execute('''create table test1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') for i in range(self.row_num): tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - + for i in integer_list: for j in self.param_list: tdSql.query(f"select stateduration(col{i},'{j}',5) from test1") tdSql.checkRows(10) # print(tdSql.queryResult) if j in ['LT' ,'lt','Lt','lT']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) elif j in ['GT','gt', 'Gt','gT']: - tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)]) elif j in ['LE','le','Le','lE']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (4000,), (-1,), (-1,), (-1,), (-1,), (-1,)]) elif j in [ 'GE','ge','Ge','gE']: - tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,), (5000,)]) elif j in ['NE','ne','Ne','nE']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) - elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)]) + elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) for i in float_list: for j in self.param_list: @@ -101,22 +101,22 @@ class TDTestCase: tdSql.checkRows(10) print(tdSql.queryResult) if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (4000,), (-1,), (-1,), (-1,), (-1,), (-1,)]) elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: - tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)]) elif j in ['NE','ne','Ne','nE']: - tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (4000,), (5000,), (6000,), (7000,), (8000,), (9000,)]) elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) # timestamp = 1m, time_unit =1m - tdSql.execute('''create table test2(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute('''create table test2(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') for i in range(self.row_num): - tdSql.execute("insert into test2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + tdSql.execute("insert into test2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i*1000*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - + for i in integer_list: for j in self.param_list: tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test2") @@ -132,7 +132,7 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) elif j in ['NE','ne','Ne','nE']: tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) - elif j in ['EQ','eq','Eq','eQ']: + elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) for i in float_list: for j in self.param_list: @@ -147,14 +147,14 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) - + # timestamp = 1h, time_unit =1h - tdSql.execute('''create table test3(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute('''create table test3(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') for i in range(self.row_num): - tdSql.execute("insert into test3 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + tdSql.execute("insert into test3 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i*1000*60*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - + for i in integer_list: for j in self.param_list: tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from test3") @@ -170,7 +170,7 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) elif j in ['NE','ne','Ne','nE']: tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) - elif j in ['EQ','eq','Eq','eQ']: + elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) for i in float_list: for j in self.param_list: @@ -202,7 +202,7 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,), (300,)]) elif j in ['NE','ne','Ne','nE']: tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (-1,), (0,), (60,), (120,), (180,), (240,)]) - elif j in ['EQ','eq','Eq','eQ']: + elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) for i in float_list: for j in self.param_list: @@ -219,13 +219,13 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) # for stb - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(t0 int)''') tdSql.execute('create table stb_1 using stb tags(1)') for i in range(self.row_num): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i*1000*60*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - + for i in integer_list: for j in self.param_list: tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from stb") @@ -241,7 +241,7 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) elif j in ['NE','ne','Ne','nE']: tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) - elif j in ['EQ','eq','Eq','eQ']: + elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) for i in float_list: for j in self.param_list: @@ -256,10 +256,10 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) elif j in ['EQ','eq','Eq','eQ']: tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) - + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) From 5d694bc5d72544ba428868c6ee3063ace6a6f37a Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 30 Jun 2022 11:45:11 +0800 Subject: [PATCH 13/14] fix translate function for now/today --- source/libs/function/src/builtins.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 1f89538447..b61f95f9a0 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -234,7 +234,7 @@ static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t l return TSDB_CODE_SUCCESS; } -static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { +static int32_t translateNowToday(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { // pseudo column do not need to check parameters //add database precision as param @@ -245,6 +245,13 @@ static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, in return TSDB_CODE_SUCCESS; } +static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + // pseudo column do not need to check parameters + + pFunc->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_TIMESTAMP}; + return TSDB_CODE_SUCCESS; +} + static int32_t translateTimezone(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { pFunc->node.resType = (SDataType){.bytes = TD_TIMEZONE_LEN, .type = TSDB_DATA_TYPE_BINARY}; return TSDB_CODE_SUCCESS; @@ -2471,7 +2478,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "now", .type = FUNCTION_TYPE_NOW, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC, - .translateFunc = translateTimePseudoColumn, + .translateFunc = translateNowToday, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = nowFunction, @@ -2481,7 +2488,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "today", .type = FUNCTION_TYPE_TODAY, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC, - .translateFunc = translateTimePseudoColumn, + .translateFunc = translateNowToday, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = todayFunction, From 45e9b239015bde25882acd1d7b29a8d2a1889ac3 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 30 Jun 2022 13:53:11 +0800 Subject: [PATCH 14/14] fix: install script for3.0 (#14379) * fix: remove sudo from directory maniplation for 3.0 * fix: remove more sudo from directory maniplation * fix: packaging/release.sh --- packaging/release.sh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/packaging/release.sh b/packaging/release.sh index 1d9f6dfc68..2ac52a0df9 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -111,9 +111,9 @@ else fi csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi +#if command -v sudo > /dev/null; then +# csudo="sudo " +#fi function is_valid_version() { [ -z $1 ] && return 1 || : @@ -181,7 +181,9 @@ cd "${curr_dir}" # 2. cmake executable file compile_dir="${top_dir}/debug" -${csudo}rm -rf ${compile_dir} +if [ -d ${compile_dir} ]; then + rm -rf ${compile_dir} +fi mkdir -p ${compile_dir} cd ${compile_dir} @@ -258,9 +260,9 @@ if [ "$osType" != "Darwin" ]; then if [[ "$pagMode" == "full" ]]; then if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then cd ${top_dir}/tools/taos-tools/packaging/deb + taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}') [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" - taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}') ${csudo}./make-taos-tools-deb.sh ${top_dir} \ ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} fi @@ -283,9 +285,9 @@ if [ "$osType" != "Darwin" ]; then if [[ "$pagMode" == "full" ]]; then if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then cd ${top_dir}/tools/taos-tools/packaging/rpm + taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g') [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" - taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g') ${csudo}./make-taos-tools-rpm.sh ${top_dir} \ ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} fi @@ -300,7 +302,7 @@ if [ "$osType" != "Darwin" ]; then ${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName} ${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} - # ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} else # only make client for Darwin