From 53eefc1ba27272c0e87ea63863bc475989248e37 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 23 Jun 2022 20:27:46 +0800 Subject: [PATCH 01/16] add test case for irate and check value of all --- tests/system-test/2-query/irate.py | 279 +++++++++++++++++++++++++++++ tests/system-test/fulltest.sh | 1 + 2 files changed, 280 insertions(+) create mode 100644 tests/system-test/2-query/irate.py diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py new file mode 100644 index 0000000000..3a451d069c --- /dev/null +++ b/tests/system-test/2-query/irate.py @@ -0,0 +1,279 @@ +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * +import random + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143, + "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143, + "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143} + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + self.tb_nums = 10 + self.row_nums = 20 + self.ts = 1434938400000 + self.time_step = 1000 + + def insert_datas_and_check_irate(self ,tbnums , rownums , time_step ): + + tdLog.info(" prepare datas for auto check irate function ") + + tdSql.execute(" create database test ") + tdSql.execute(" use test ") + tdSql.execute(" create stable stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\ + c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)") + for tbnum in range(tbnums): + tbname = "sub_tb_%d"%tbnum + tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum)) + + ts = self.ts + for row in range(rownums): + ts = self.ts + time_step*row + c1 = random.randint(0,1000) + c2 = random.randint(0,100000) + c3 = random.randint(0,125) + c4 = random.randint(0,125) + c5 = random.random()/1.0 + c6 = random.random()/1.0 + c7 = "'true'" + c8 = "'binary_val'" + c9 = "'nchar_val'" + c10 = ts + tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})") + + tdSql.execute("use test") + tbnames = ["stb", "sub_tb_1"] + support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"] + for tbname in tbnames: + tdSql.query("desc {}".format(tbname)) + coltypes = tdSql.queryResult + for coltype in coltypes: + colname = coltype[0] + if coltype[1] in support_types and coltype[-1] != "TAG" : + irate_sql = "select irate({}) from (select * from {} order by tbname ) ".format(colname, tbname) + origin_sql = "select ts , {} , cast(ts as bigint) from (select ts , {} from {} order by ts desc limit 2 offset 0 ) order by ts".format(colname,colname, tbname) + + tdSql.query(irate_sql) + irate_result = tdSql.queryResult + tdSql.query(origin_sql) + origin_result = tdSql.queryResult + irate_value = irate_result[0][0] + if origin_result[1][-1] - origin_result[0][-1] == 0: + comput_irate_value = 0 + elif (origin_result[1][1] - origin_result[0][1])<0: + comput_irate_value = origin_result[1][1]*1000/( origin_result[1][-1] - origin_result[0][-1]) + else: + comput_irate_value = (origin_result[1][1] - origin_result[0][1])*1000/( origin_result[1][-1] - origin_result[0][-1]) + if comput_irate_value ==irate_value: + tdLog.info(" irate work as expected , sql is %s "% irate_sql) + else: + tdLog.exit(" irate work not as expected , sql is %s "% irate_sql) + + def prepare_tag_datas(self): + # prepare datas + tdSql.execute( + "create database if not exists testdb keep 3650 duration 1000") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute( + f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + "insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute( + "insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute( + "insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute( + "insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute( + "insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute( + "insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute( + "insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def test_errors(self): + tdSql.execute("use testdb") + error_sql_lists = [ + "select irate from t1", + "select irate(-+--+c1) from t1", + # "select +-irate(c1) from t1", + # "select ++-irate(c1) from t1", + # "select ++--irate(c1) from t1", + # "select - -irate(c1)*0 from t1", + # "select irate(tbname+1) from t1 ", + "select irate(123--123)==1 from t1", + "select irate(c1) as 'd1' from t1", + "select irate(c1 ,c2 ) from t1", + "select irate(c1 ,NULL) from t1", + "select irate(,) from t1;", + "select irate(irate(c1) ab from t1)", + "select irate(c1) as int from t1", + "select irate from stb1", + # "select irate(-+--+c1) from stb1", + # "select +-irate(c1) from stb1", + # "select ++-irate(c1) from stb1", + # "select ++--irate(c1) from stb1", + # "select - -irate(c1)*0 from stb1", + # "select irate(tbname+1) from stb1 ", + "select irate(123--123)==1 from stb1", + "select irate(c1) as 'd1' from stb1", + "select irate(c1 ,c2 ) from stb1", + "select irate(c1 ,NULL) from stb1", + "select irate(,) from stb1;", + "select irate(abs(c1) ab from stb1)", + "select irate(c1) as int from stb1" + ] + for error_sql in error_sql_lists: + tdSql.error(error_sql) + + def support_types(self): + tdSql.execute("use testdb") + tbnames = ["stb1", "t1", "ct1", "ct2"] + support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"] + for tbname in tbnames: + tdSql.query("desc {}".format(tbname)) + coltypes = tdSql.queryResult + for coltype in coltypes: + colname = coltype[0] + irate_sql = "select irate({}) from {}".format(colname, tbname) + if coltype[1] in support_types: + tdSql.query(irate_sql) + else: + tdSql.error(irate_sql) + + def basic_irate_function(self): + + # used for empty table , ct3 is empty + tdSql.query("select irate(c1) from ct3") + tdSql.checkRows(0) + tdSql.query("select irate(c2) from ct3") + tdSql.checkRows(0) + + # used for regular table + tdSql.query("select irate(c1) from t1") + tdSql.checkData(0, 0, 0.000000386) + + # used for sub table + tdSql.query("select irate(abs(c1+c2)) from ct1") + tdSql.checkData(0, 0, 0.000000000) + + + # mix with common col + tdSql.error("select c1, irate(c1) from ct1") + + # mix with common functions + tdSql.error("select irate(c1), abs(c1) from ct4 ") + + # agg functions mix with agg functions + tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname ") + tdSql.checkData(0, 0, 0.000000000) + tdSql.checkData(1, 0, 0.000000000) + tdSql.checkData(0, 1, 13) + tdSql.checkData(1, 1, 9) + + + def irate_func_filter(self): + tdSql.execute("use testdb") + tdSql.query( + "select irate(c1+2)/2 from ct4 where c1>5 ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.000000514) + + tdSql.query( + "select irate(c1+c2)/10 from ct4 where c1=5 ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.000000000) + + tdSql.query( + "select irate(c1+c2)/10 from stb1 where c1 = 5 partition by tbname ") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 0.000000000) + + + def irate_Arithmetic(self): + pass + + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_tag_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.test_errors() + + tdLog.printNoPrefix("==========step3:support types ============") + + self.support_types() + + tdLog.printNoPrefix("==========step4: irate basic query ============") + + self.basic_irate_function() + + tdLog.printNoPrefix("==========step5: irate filter query ============") + + self.irate_func_filter() + + + tdLog.printNoPrefix("==========step6: check result of query ============") + + self.insert_datas_and_check_irate(self.tb_nums,self.row_nums,self.time_step) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 19a67e924c..8d651ec35c 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -109,6 +109,7 @@ python3 ./test.py -f 2-query/distribute_agg_apercentile.py python3 ./test.py -f 2-query/distribute_agg_avg.py python3 ./test.py -f 2-query/distribute_agg_stddev.py python3 ./test.py -f 2-query/twa.py +python3 ./test.py -f 2-query/irate.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py From 01f64b4d17db39954f939ea4c8b6afbaba67a438 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 24 Jun 2022 15:28:37 +0800 Subject: [PATCH 02/16] fix: fix bugs of less output when interval overlaps --- source/libs/executor/src/timewindowoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 554fce3968..f0358b6d7b 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -3924,7 +3924,7 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t return 0; } - if (newWin == NULL || (ascScan && newWin->skey > prevWin->ekey || (!ascScan) && newWin->skey < prevWin->ekey)) { + if (newWin == NULL || (ascScan && newWin->skey > prevWin->skey || (!ascScan) && newWin->skey < prevWin->skey)) { SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &prevWin->skey, TSDB_KEYSIZE, tableGroupId); SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); From 7fe7b9625e94c49eabb052cbde76780763f9e783 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 24 Jun 2022 16:03:57 +0800 Subject: [PATCH 03/16] test:add test case for tmq --- tests/system-test/7-tmq/tmqCommon.py | 55 +++++-- tests/system-test/7-tmq/tmqConsumerGroup.py | 170 ++++++++++++++++++++ tests/system-test/fulltest.sh | 1 + tests/test/c/tmqSim.c | 150 +++++++++-------- 4 files changed, 293 insertions(+), 83 deletions(-) create mode 100644 tests/system-test/7-tmq/tmqConsumerGroup.py diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py index d17e36fc97..b8aa78e3ac 100644 --- a/tests/system-test/7-tmq/tmqCommon.py +++ b/tests/system-test/7-tmq/tmqCommon.py @@ -11,7 +11,9 @@ # -*- coding: utf-8 -*- +from asyncore import loop from collections import defaultdict +import subprocess import random import string import threading @@ -75,7 +77,7 @@ class TMQCom: return resultList - def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0,alias=0): buildPath = tdCom.getBuildPath() cfgPath = tdCom.getClientCfgPath() if valgrind == 1: @@ -88,30 +90,53 @@ class TMQCom: shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> nul 2>&1 &" else: - shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath + processorName = buildPath + '/build/bin/tmq_sim' + if alias != 0: + processorNameNew = buildPath + '/build/bin/tmq_sim_new' + shellCmd = 'cp %s %s'%(processorName, processorNameNew) + os.system(shellCmd) + processorName = processorNameNew + shellCmd = 'nohup ' + processorName + ' -c ' + cfgPath shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) - os.system(shellCmd) + os.system(shellCmd) - def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'): - while 1: + def stopTmqSimProcess(self, processorName): + psCmd = "ps -ef|grep -w %s|grep -v grep | awk '{print $2}'"%(processorName) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(0.2) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + tdLog.debug("%s is stopped by kill -INT" % (processorName)) + + def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb',rows=1): + loopFlag = 1 + while loopFlag: tdSql.query("select * from %s.notifyinfo"%cdbName) #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) - if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0): - break - else: - time.sleep(0.1) + actRows = tdSql.getRows() + if (actRows >= rows): + for i in range(actRows): + if tdSql.getData(i, 1) == 0: + loopFlag = 0 + break + time.sleep(0.1) return - def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'): - while 1: + def getStartCommitNotifyFromTmqsim(self,cdbName='cdb',rows=2): + loopFlag = 1 + while loopFlag: tdSql.query("select * from %s.notifyinfo"%cdbName) #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) - if tdSql.getRows() == 2 : - print(tdSql.getData(0, 1), tdSql.getData(1, 1)) - if tdSql.getData(1, 1) == 1: - break + actRows = tdSql.getRows() + if (actRows >= rows): + for i in range(actRows): + if tdSql.getData(i, 1) == 1: + loopFlag = 0 + break time.sleep(0.1) return diff --git a/tests/system-test/7-tmq/tmqConsumerGroup.py b/tests/system-test/7-tmq/tmqConsumerGroup.py new file mode 100644 index 0000000000..bfd63fd4a2 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsumerGroup.py @@ -0,0 +1,170 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def checkFileContent(self, consumerId, queryString): + buildPath = tdCom.getBuildPath() + cfgPath = tdCom.getClientCfgPath() + dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) + cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) + tdLog.info(cmdStr) + os.system(cmdStr) + + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) + tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) + + consumeFile = open(consumeRowsFile, mode='r') + queryFile = open(dstFile, mode='r') + + # skip first line for it is schema + queryFile.readline() + + while True: + dst = queryFile.readline() + src = consumeFile.readline() + + if dst: + if dst != src: + tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) + else: + break + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db1', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 20, + 'showMsg': 1, + 'showRow': 1} + + topicNameList = ['topic1', 'topic2'] + expectRowsList = [] + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdLog.info("create stb") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + tdLog.info("insert data") + tmqCom.insert_data_2(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + + tdLog.info("create topics from stb with filter") + # queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # create one stb2 + paraDict["stbName"] = 'stb2' + paraDict["ctbPrefix"] = 'ctbx' + paraDict["rowsPerTbl"] = 5000 + tdLog.info("create stb2") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb2") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + + # queryString = "select ts, sin(c1), abs(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + queryString = "select ts, sin(c1), abs(pow(c1,3)) from %s.%s" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[1], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + # tdSql.query(queryString) + # expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 + topicList = "%s,%s"%(topicNameList[0],topicNameList[1]) + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:3000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("start consume processor 2") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],'cdb',0,1) + + tdLog.info("async insert data") + pThread = tmqCom.asyncInsertData(paraDict) + + tdLog.info("wait consumer commit notify") + tmqCom.getStartCommitNotifyFromTmqsim(rows=4) + + tdLog.info("pkill one consume processor") + tmqCom.stopTmqSimProcess('tmq_sim_new') + + pThread.join() + + tdLog.info("wait the consume result") + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actTotalRows = 0 + for i in range(len(resultList)): + actTotalRows += resultList[i] + + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + expectTotalRows = 0 + for i in range(len(expectRowsList)): + expectTotalRows += expectRowsList[i] + + tdLog.info("act consume rows: %d, expect consume rows: %d"%(actTotalRows, expectTotalRows)) + if expectTotalRows <= resultList[0]: + tdLog.info("act consume rows: %d should >= expect consume rows: %d"%(actTotalRows, expectTotalRows)) + tdLog.exit("0 tmq consume rows error!") + + # time.sleep(10) + # for i in range(len(topicNameList)): + # tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + tdSql.prepare() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 73e6716cad..a136334f70 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -135,3 +135,4 @@ python3 ./test.py -f 7-tmq/stbFilter.py python3 ./test.py -f 7-tmq/tmqCheckData.py python3 ./test.py -f 7-tmq/tmqUdf.py #python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 5 +python3 ./test.py -f 7-tmq/tmqConsumerGroup.py diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index e96f5d4793..59005fae94 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -22,9 +22,9 @@ #include #include "taos.h" -#include "taosdef.h" #include "taoserror.h" #include "tlog.h" +#include "taosdef.h" #include "types.h" #define GREEN "\033[1;32m" @@ -36,7 +36,11 @@ #define MAX_CONSUMER_THREAD_CNT (16) #define MAX_VGROUP_CNT (32) -typedef enum { NOTIFY_CMD_START_CONSUM, NOTIFY_CMD_START_COMMIT, NOTIFY_CMD_ID_BUTT } NOTIFY_CMD_ID; +typedef enum { + NOTIFY_CMD_START_CONSUM, + NOTIFY_CMD_START_COMMIT, + NOTIFY_CMD_ID_BUTT +}NOTIFY_CMD_ID; typedef struct { TdThread thread; @@ -48,8 +52,8 @@ typedef struct { // char autoOffsetRest[16]; // none, earliest, latest TdFilePtr pConsumeRowsFile; - int32_t ifCheckData; - int64_t expectMsgCnt; + int32_t ifCheckData; + int64_t expectMsgCnt; int64_t consumeMsgCnt; int64_t consumeRowCnt; @@ -85,7 +89,6 @@ typedef struct { int32_t saveRowFlag; int32_t consumeDelay; // unit s int32_t numOfThread; - int32_t useSnapshot; SThreadInfo stThreads[MAX_CONSUMER_THREAD_CNT]; } SConfInfo; @@ -93,8 +96,6 @@ static SConfInfo g_stConfInfo; TdFilePtr g_fp = NULL; static int running = 1; -int8_t useSnapshot = 0; - // char* g_pRowValue = NULL; // TdFilePtr g_fp = NULL; @@ -126,11 +127,23 @@ char* getCurrentTimeString(char* timeString) { return timeString; } +static void tmqStop(int signum, void *info, void *ctx) { + running = 0; + char tmpString[128]; + taosFprintfFile(g_fp, "%s tmqStop() receive stop signal[%d]\n", getCurrentTimeString(tmpString), signum); +} + +static void tmqSetSignalHandle() { + taosSetSignal(SIGINT, tmqStop); +} + void initLogFile() { char filename[256]; char tmpString[128]; - sprintf(filename, "%s/../log/tmqlog_%s.txt", configDir, getCurrentTimeString(tmpString)); + pid_t process_id = getpid(); + + sprintf(filename, "%s/../log/tmqlog-%d-%s.txt", configDir, process_id, getCurrentTimeString(tmpString)); #ifdef WINDOWS for (int i = 2; i < sizeof(filename); i++) { if (filename[i] == ':') filename[i] = '-'; @@ -204,8 +217,6 @@ void parseArgument(int32_t argc, char* argv[]) { g_stConfInfo.saveRowFlag = atol(argv[++i]); } else if (strcmp(argv[i], "-y") == 0) { g_stConfInfo.consumeDelay = atol(argv[++i]); - } else if (strcmp(argv[i], "-e") == 0) { - useSnapshot = (int8_t)atol(argv[++i]); } else { pError("%s unknow para: %s %s", GREEN, argv[++i], NC); exit(-1); @@ -299,11 +310,11 @@ int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) { return 0; } -static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { - // if (shell.args.is_raw_time) { - // sprintf(buf, "%" PRId64, val); - // return buf; - // } +static char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { + //if (shell.args.is_raw_time) { + // sprintf(buf, "%" PRId64, val); + // return buf; + //} time_t tt; int32_t ms = 0; @@ -341,7 +352,7 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { } } - struct tm* ptm = taosLocalTime(&tt, NULL); + struct tm *ptm = taosLocalTime(&tt, NULL); size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", ptm); if (precision == TSDB_TIME_PRECISION_NANO) { @@ -355,8 +366,7 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { return buf; } -static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length, - int32_t precision) { +static void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) { if (val == NULL) { taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); return; @@ -366,31 +376,31 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* f char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: - taosFprintfFile(pFile, "%d", ((((int32_t)(*((char*)val))) == 1) ? 1 : 0)); + taosFprintfFile(pFile, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); break; case TSDB_DATA_TYPE_TINYINT: - taosFprintfFile(pFile, "%d", *((int8_t*)val)); + taosFprintfFile(pFile, "%d", *((int8_t *)val)); break; case TSDB_DATA_TYPE_UTINYINT: - taosFprintfFile(pFile, "%u", *((uint8_t*)val)); + taosFprintfFile(pFile, "%u", *((uint8_t *)val)); break; case TSDB_DATA_TYPE_SMALLINT: - taosFprintfFile(pFile, "%d", *((int16_t*)val)); + taosFprintfFile(pFile, "%d", *((int16_t *)val)); break; case TSDB_DATA_TYPE_USMALLINT: - taosFprintfFile(pFile, "%u", *((uint16_t*)val)); + taosFprintfFile(pFile, "%u", *((uint16_t *)val)); break; case TSDB_DATA_TYPE_INT: - taosFprintfFile(pFile, "%d", *((int32_t*)val)); + taosFprintfFile(pFile, "%d", *((int32_t *)val)); break; case TSDB_DATA_TYPE_UINT: - taosFprintfFile(pFile, "%u", *((uint32_t*)val)); + taosFprintfFile(pFile, "%u", *((uint32_t *)val)); break; case TSDB_DATA_TYPE_BIGINT: - taosFprintfFile(pFile, "%" PRId64, *((int64_t*)val)); + taosFprintfFile(pFile, "%" PRId64, *((int64_t *)val)); break; case TSDB_DATA_TYPE_UBIGINT: - taosFprintfFile(pFile, "%" PRIu64, *((uint64_t*)val)); + taosFprintfFile(pFile, "%" PRIu64, *((uint64_t *)val)); break; case TSDB_DATA_TYPE_FLOAT: taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val)); @@ -411,7 +421,7 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* f taosFprintfFile(pFile, "\'%s\'", buf); break; case TSDB_DATA_TYPE_TIMESTAMP: - shellFormatTimestamp(buf, *(int64_t*)val, precision); + shellFormatTimestamp(buf, *(int64_t *)val, precision); taosFprintfFile(pFile, "'%s'", buf); break; default: @@ -419,13 +429,12 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* f } } -static void dumpToFileForCheck(TdFilePtr pFile, TAOS_ROW row, TAOS_FIELD* fields, int32_t* length, int32_t num_fields, - int32_t precision) { +static void dumpToFileForCheck(TdFilePtr pFile, TAOS_ROW row, TAOS_FIELD* fields, int32_t* length, int32_t num_fields, int32_t precision) { for (int32_t i = 0; i < num_fields; i++) { if (i > 0) { taosFprintfFile(pFile, "\n"); } - shellDumpFieldToFile(pFile, (const char*)row[i], fields + i, length[i], precision); + shellDumpFieldToFile(pFile, (const char *)row[i], fields + i, length[i], precision); } taosFprintfFile(pFile, "\n"); } @@ -435,42 +444,40 @@ static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) int32_t totalRows = 0; // printf("topic: %s\n", tmq_get_topic_name(msg)); - int32_t vgroupId = tmq_get_vgroup_id(msg); - const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); + const char* dbName = tmq_get_db_name(msg); taosFprintfFile(g_fp, "consumerId: %d, msg index:%" PRId64 "\n", pInfo->consumerId, msgIndex); - taosFprintfFile(g_fp, "dbName: %s, topic: %s, vgroupId: %d\n", dbName != NULL ? dbName : "invalid table", - tmq_get_topic_name(msg), vgroupId); + taosFprintfFile(g_fp, "dbName: %s, topic: %s, vgroupId: %d\n", dbName != NULL ? dbName : "invalid table", tmq_get_topic_name(msg), vgroupId); while (1) { TAOS_ROW row = taos_fetch_row(msg); if (row == NULL) break; - TAOS_FIELD* fields = taos_fetch_fields(msg); + TAOS_FIELD* fields = taos_fetch_fields(msg); int32_t numOfFields = taos_field_count(msg); - int32_t* length = taos_fetch_lengths(msg); - int32_t precision = taos_result_precision(msg); - const char* tbName = tmq_get_table_name(msg); + int32_t* length = taos_fetch_lengths(msg); + int32_t precision = taos_result_precision(msg); + const char* tbName = tmq_get_table_name(msg); - #if 0 + #if 0 // get schema //============================== stub =================================================// for (int32_t i = 0; i < numOfFields; i++) { taosFprintfFile(g_fp, "%02d: name: %s, type: %d, len: %d\n", i, fields[i].name, fields[i].type, fields[i].bytes); } //============================== stub =================================================// - #endif - - dumpToFileForCheck(pInfo->pConsumeRowsFile, row, fields, length, numOfFields, precision); - + #endif + + dumpToFileForCheck(pInfo->pConsumeRowsFile, row, fields, length, numOfFields, precision); taos_print_row(buf, row, fields, numOfFields); if (0 != g_stConfInfo.showRowFlag) { taosFprintfFile(g_fp, "tbname:%s, rows[%d]: %s\n", (tbName != NULL ? tbName : "null table"), totalRows, buf); - // if (0 != g_stConfInfo.saveRowFlag) { - // saveConsumeContentToTbl(pInfo, buf); - // } + //if (0 != g_stConfInfo.saveRowFlag) { + // saveConsumeContentToTbl(pInfo, buf); + //} } totalRows++; @@ -493,7 +500,8 @@ int queryDB(TAOS* taos, char* command) { return 0; } -static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) {} +static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) { +} int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { char sqlStr[1024] = {0}; @@ -501,8 +509,11 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { int64_t now = taosGetTimestampMs(); // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int - sprintf(sqlStr, "insert into %s.notifyinfo values (%" PRId64 ", %d, %d)", g_stConfInfo.cdbName, now, cmdId, - pInfo->consumerId); + sprintf(sqlStr, "insert into %s.notifyinfo values (%"PRId64", %d, %d)", + g_stConfInfo.cdbName, + now, + cmdId, + pInfo->consumerId); taos_query_a(pInfo->taos, sqlStr, appNothing, NULL); @@ -512,14 +523,16 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { } static int32_t g_once_commit_flag = 0; -static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { - pError("tmq_commit_cb_print() commit %d\n", code); +static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + pError("tmq_commit_cb_print() commit %d\n", code); if (0 == g_once_commit_flag) { g_once_commit_flag = 1; - notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); + notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); } - taosFprintfFile(g_fp, "tmq_commit_cb_print() be called\n"); + + char tmpString[128]; + taosFprintfFile(g_fp, "%s tmq_commit_cb_print() be called\n", getCurrentTimeString(tmpString)); } void build_consumer(SThreadInfo* pInfo) { @@ -551,10 +564,6 @@ void build_consumer(SThreadInfo* pInfo) { // tmq_conf_set(conf, "auto.offset.reset", "none"); // tmq_conf_set(conf, "auto.offset.reset", "earliest"); // tmq_conf_set(conf, "auto.offset.reset", "latest"); - // - if (useSnapshot) { - tmq_conf_set(conf, "experiment.use.snapshot", "true"); - } pInfo->tmq = tmq_consumer_new(conf, NULL, 0); @@ -609,13 +618,12 @@ void loop_consume(SThreadInfo* pInfo) { pInfo->consumerId); pInfo->ts = taosGetTimestampMs(); - + if (pInfo->ifCheckData) { - char filename[256] = {0}; + char filename[256] = {0}; char tmpString[128]; - // sprintf(filename, "%s/../log/consumerid_%d_%s.txt", configDir, pInfo->consumerId, - // getCurrentTimeString(tmpString)); - sprintf(filename, "%s/../log/consumerid_%d.txt", configDir, pInfo->consumerId); + //sprintf(filename, "%s/../log/consumerid_%d_%s.txt", configDir, pInfo->consumerId, getCurrentTimeString(tmpString)); + sprintf(filename, "%s/../log/consumerid_%d.txt", configDir, pInfo->consumerId); pInfo->pConsumeRowsFile = taosOpenFile(filename, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM); if (pInfo->pConsumeRowsFile == NULL) { taosFprintfFile(g_fp, "%s create file fail for save rows content\n", getCurrentTimeString(tmpString)); @@ -634,10 +642,10 @@ void loop_consume(SThreadInfo* pInfo) { totalMsgs++; - if (0 == once_flag) { + if (0 == once_flag) { once_flag = 1; - notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM); - } + notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM); + } if (totalRows >= pInfo->expectMsgCnt) { char tmpString[128]; @@ -651,6 +659,10 @@ void loop_consume(SThreadInfo* pInfo) { } } + if (0 == running) { + taosFprintfFile(g_fp, "receive stop signal and not continue consume\n"); + } + pInfo->consumeMsgCnt = totalMsgs; pInfo->consumeRowCnt = totalRows; @@ -666,7 +678,7 @@ void* consumeThreadFunc(void* param) { pInfo->taos = taos_connect(NULL, "root", "taosdata", NULL, 0); if (pInfo->taos == NULL) { taosFprintfFile(g_fp, "taos_connect() fail, can not notify and save consume result to main scripte\n"); - return NULL; + return NULL; } build_consumer(pInfo); @@ -680,7 +692,7 @@ void* consumeThreadFunc(void* param) { int32_t err = tmq_subscribe(pInfo->tmq, pInfo->topicList); if (err != 0) { pError("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err)); - taosFprintfFile(g_fp, "tmq_subscribe()! reason: %s\n", tmq_err2str(err)); + taosFprintfFile(g_fp, "tmq_subscribe() fail! reason: %s\n", tmq_err2str(err)); assert(0); return NULL; } @@ -829,6 +841,8 @@ int main(int32_t argc, char* argv[]) { getConsumeInfo(); saveConfigToLogFile(); + tmqSetSignalHandle(); + TdThreadAttr thattr; taosThreadAttrInit(&thattr); taosThreadAttrSetDetachState(&thattr, PTHREAD_CREATE_JOINABLE); From a533f3497c0ec525dca5d6a5c1f31c9ac7a27e6c Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Fri, 24 Jun 2022 16:28:56 +0800 Subject: [PATCH 04/16] add case for scalar function for null value --- tests/system-test/2-query/function_null.py | 225 +++++++++++++++++++++ tests/system-test/fulltest.sh | 2 + 2 files changed, 227 insertions(+) create mode 100644 tests/system-test/2-query/function_null.py diff --git a/tests/system-test/2-query/function_null.py b/tests/system-test/2-query/function_null.py new file mode 100644 index 0000000000..d301fb2ca3 --- /dev/null +++ b/tests/system-test/2-query/function_null.py @@ -0,0 +1,225 @@ +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * +import random + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143, + "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143, + "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143} + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + self.tb_nums = 10 + self.row_nums = 20 + self.ts = 1434938400000 + self.time_step = 1000 + + def prepare_tag_datas(self): + # prepare datas + tdSql.execute( + "create database if not exists testdb keep 3650 duration 1000") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute( + f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + "insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute( + "insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute( + "insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute( + "insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute( + "insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute( + "insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute( + "insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def function_for_null_data(self): + + function_names = ["abs" , "floor" , "ceil" , "round"] + + for function_name in function_names: + + scalar_sql_1 = f"select {function_name}(c1)/0 from t1 group by c1 order by c1" + scalar_sql_2 = f"select {function_name}(c1/0) from t1 group by c1 order by c1" + tdSql.query(scalar_sql_1) + tdSql.checkRows(10) + tdSql.checkData(0,0,None) + tdSql.checkData(9,0,None) + tdSql.query(scalar_sql_2) + tdSql.checkRows(10) + tdSql.checkData(0,0,None) + tdSql.checkData(9,0,None) + + function_names = ["sin" ,"cos" ,"tan" ,"asin" ,"acos" ,"atan"] + + PI = 3.141592654 + + # sin + tdSql.query(" select sin(c1/0) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select sin(0.00) from t1 group by c1 order by c1") + tdSql.checkData(9,0,0.000000000) + + tdSql.query(f" select sin({PI/2}) from t1 group by c1 order by c1") + tdSql.checkData(9,0,1.0) + + tdSql.query(" select sin(1000) from t1 group by c1 order by c1") + tdSql.checkData(9,0,0.826879541) + + # cos + tdSql.query(" select cos(c1/0) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select cos(0.00) from t1 group by c1 order by c1") + tdSql.checkData(9,0,1.000000000) + + tdSql.query(f" select cos({PI}/2) from t1 group by c1 order by c1") + + tdSql.query(" select cos(1000) from t1 group by c1 order by c1") + tdSql.checkData(9,0,0.562379076) + + + # tan + tdSql.query(" select tan(c1/0) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select tan(0.00) from t1 group by c1 order by c1") + tdSql.checkData(9,0,0.000000000) + + tdSql.query(f" select tan({PI}/2) from t1 group by c1 order by c1") + + tdSql.query(" select tan(1000) from t1 group by c1 order by c1") + tdSql.checkData(9,0,1.470324156) + + # atan + tdSql.query(" select atan(c1/0) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select atan(0.00) from t1 group by c1 order by c1") + tdSql.checkData(9,0,0.000000000) + + tdSql.query(f" select atan({PI}/2) from t1 group by c1 order by c1") + tdSql.checkData(9,0,1.003884822) + + tdSql.query(" select atan(1000) from t1 group by c1 order by c1") + tdSql.checkData(9,0,1.569796327) + + # asin + tdSql.query(" select asin(c1/0) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select asin(0.00) from t1 group by c1 order by c1") + tdSql.checkData(9,0,0.000000000) + + tdSql.query(f" select asin({PI}/2) from t1 group by c1 order by c1") + + tdSql.query(" select asin(1000) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + # acos + + tdSql.query(" select acos(c1/0) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select acos(0.00) from t1 group by c1 order by c1") + tdSql.checkData(9,0,1.570796327) + + tdSql.query(f" select acos({PI}/2) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select acos(1000) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + function_names = ["log" ,"pow"] + + # log + tdSql.query(" select log(-10) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select log(c1)/0 from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(f" select log(0.00) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + # pow + tdSql.query(" select pow(c1,10000) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(" select pow(c1,2)/0 from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + tdSql.query(f" select pow(c1/0 ,1 ) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_tag_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.function_for_null_data() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 73e6716cad..3c6f4de5cb 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -110,6 +110,8 @@ python3 ./test.py -f 2-query/distribute_agg_avg.py python3 ./test.py -f 2-query/distribute_agg_stddev.py python3 ./test.py -f 2-query/twa.py +python3 ./test.py -f 2-query/function_null.py + python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 From 125944ce15893590fdf92803c75597ba9fbe7029 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Fri, 24 Jun 2022 17:05:03 +0800 Subject: [PATCH 05/16] add case for compute null value --- tests/system-test/2-query/function_null.py | 29 ++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/system-test/2-query/function_null.py b/tests/system-test/2-query/function_null.py index d301fb2ca3..4de7a7f113 100644 --- a/tests/system-test/2-query/function_null.py +++ b/tests/system-test/2-query/function_null.py @@ -92,6 +92,7 @@ class TDTestCase: scalar_sql_1 = f"select {function_name}(c1)/0 from t1 group by c1 order by c1" scalar_sql_2 = f"select {function_name}(c1/0) from t1 group by c1 order by c1" + scalar_sql_3 = f"select {function_name}(NULL) from t1 group by c1 order by c1" tdSql.query(scalar_sql_1) tdSql.checkRows(10) tdSql.checkData(0,0,None) @@ -100,6 +101,10 @@ class TDTestCase: tdSql.checkRows(10) tdSql.checkData(0,0,None) tdSql.checkData(9,0,None) + tdSql.query(scalar_sql_3) + tdSql.checkRows(10) + tdSql.checkData(0,0,None) + tdSql.checkData(9,0,None) function_names = ["sin" ,"cos" ,"tan" ,"asin" ,"acos" ,"atan"] @@ -109,6 +114,9 @@ class TDTestCase: tdSql.query(" select sin(c1/0) from t1 group by c1 order by c1") tdSql.checkData(9,0,None) + tdSql.query(" select sin(NULL) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + tdSql.query(" select sin(0.00) from t1 group by c1 order by c1") tdSql.checkData(9,0,0.000000000) @@ -122,6 +130,9 @@ class TDTestCase: tdSql.query(" select cos(c1/0) from t1 group by c1 order by c1") tdSql.checkData(9,0,None) + tdSql.query(" select cos(NULL) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + tdSql.query(" select cos(0.00) from t1 group by c1 order by c1") tdSql.checkData(9,0,1.000000000) @@ -135,6 +146,9 @@ class TDTestCase: tdSql.query(" select tan(c1/0) from t1 group by c1 order by c1") tdSql.checkData(9,0,None) + tdSql.query(" select tan(NULL) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + tdSql.query(" select tan(0.00) from t1 group by c1 order by c1") tdSql.checkData(9,0,0.000000000) @@ -147,6 +161,9 @@ class TDTestCase: tdSql.query(" select atan(c1/0) from t1 group by c1 order by c1") tdSql.checkData(9,0,None) + tdSql.query(" select atan(NULL) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + tdSql.query(" select atan(0.00) from t1 group by c1 order by c1") tdSql.checkData(9,0,0.000000000) @@ -160,6 +177,9 @@ class TDTestCase: tdSql.query(" select asin(c1/0) from t1 group by c1 order by c1") tdSql.checkData(9,0,None) + tdSql.query(" select asin(NULL) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + tdSql.query(" select asin(0.00) from t1 group by c1 order by c1") tdSql.checkData(9,0,0.000000000) @@ -173,6 +193,9 @@ class TDTestCase: tdSql.query(" select acos(c1/0) from t1 group by c1 order by c1") tdSql.checkData(9,0,None) + tdSql.query(" select acos(NULL) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + tdSql.query(" select acos(0.00) from t1 group by c1 order by c1") tdSql.checkData(9,0,1.570796327) @@ -188,6 +211,9 @@ class TDTestCase: tdSql.query(" select log(-10) from t1 group by c1 order by c1") tdSql.checkData(9,0,None) + tdSql.query(" select log(NULL ,2) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + tdSql.query(" select log(c1)/0 from t1 group by c1 order by c1") tdSql.checkData(9,0,None) @@ -201,6 +227,9 @@ class TDTestCase: tdSql.query(" select pow(c1,2)/0 from t1 group by c1 order by c1") tdSql.checkData(9,0,None) + tdSql.query(" select pow(NULL,2) from t1 group by c1 order by c1") + tdSql.checkData(9,0,None) + tdSql.query(f" select pow(c1/0 ,1 ) from t1 group by c1 order by c1") tdSql.checkData(9,0,None) From 1d7bf59fe8a3dc3d965bbe5ec79a30e270833dc7 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 24 Jun 2022 17:23:13 +0800 Subject: [PATCH 06/16] fix: fix iteration bugs of last window result --- source/libs/executor/src/timewindowoperator.c | 51 ++++++++++++------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 21c265e03d..48b0b1c071 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -3902,6 +3902,7 @@ typedef struct SMergeIntervalAggOperatorInfo { SIntervalAggOperatorInfo intervalAggOperatorInfo; SHashObj* groupIntervalHash; + void* groupIntervalIter; bool hasGroupId; uint64_t groupId; SSDataBlock* prefetchedBlock; @@ -3914,6 +3915,23 @@ void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) { destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput); } +static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId, STimeWindow* win, SSDataBlock* pResultBlock) { + SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info; + SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo; + SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; + bool ascScan = (iaInfo->order == TSDB_ORDER_ASC); + SExprSupp* pExprSup = &pOperatorInfo->exprSupp; + + SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &win->skey, TSDB_KEYSIZE, tableGroupId); + SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, + GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); + ASSERT(p1 != NULL); + finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo, + pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo); + taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); + return TSDB_CODE_SUCCESS; +} + static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId, SSDataBlock* pResultBlock, STimeWindow* newWin) { SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info; @@ -3928,20 +3946,9 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t return 0; } - if (newWin == NULL || (ascScan && newWin->skey > prevWin->skey || (!ascScan) && newWin->skey < prevWin->skey)) { - SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &prevWin->skey, TSDB_KEYSIZE, tableGroupId); - SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, - GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); - ASSERT(p1 != NULL); - - finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo, - pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo); - taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); - if (newWin == NULL) { - taosHashRemove(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId)); - } else { - taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow)); - } + if ((ascScan && newWin->skey > prevWin->skey || (!ascScan) && newWin->skey < prevWin->skey)) { + finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock); + taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow)); } return 0; @@ -4090,12 +4097,17 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) { } pRes->info.groupId = miaInfo->groupId; - } else { - void* p = taosHashIterate(miaInfo->groupIntervalHash, NULL); - if (p != NULL) { + } + + if (miaInfo->inputBlocksFinished) { + void* win = taosHashIterate(miaInfo->groupIntervalHash, miaInfo->groupIntervalIter); + if (win != NULL) { + miaInfo->groupIntervalIter = win; + size_t len = 0; - uint64_t* pKey = taosHashGetKey(p, &len); - outputPrevIntervalResult(pOperator, *pKey, pRes, NULL); + uint64_t* pTableGroupId = taosHashGetKey(win, &len); + finalizeWindowResult(pOperator, *pTableGroupId, win, pRes); + pRes->info.groupId = *pTableGroupId; } } @@ -4118,6 +4130,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI } miaInfo->groupIntervalHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, HASH_NO_LOCK); + miaInfo->groupIntervalIter = NULL; SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo; From c41a4bd7db2ffaeb26a810794c21b9df1c35cf5b Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Fri, 24 Jun 2022 17:27:43 +0800 Subject: [PATCH 07/16] feat: subplan adds 'user' field --- include/libs/nodes/plannodes.h | 1 + include/libs/planner/planner.h | 25 ++-- source/client/src/clientImpl.c | 148 ++++++++++----------- source/libs/nodes/src/nodesCodeFuncs.c | 7 + source/libs/planner/src/planPhysiCreater.c | 1 + source/libs/planner/test/planTestUtil.cpp | 16 ++- source/libs/planner/test/planTestUtil.h | 2 +- 7 files changed, 103 insertions(+), 97 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index f8cf58d8cb..b7583b41b9 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -458,6 +458,7 @@ typedef struct SSubplan { int32_t msgType; // message type for subplan, used to denote the send message type to vnode. int32_t level; // the execution level of current subplan, starting from 0 in a top-down manner. char dbFName[TSDB_DB_FNAME_LEN]; + char user[TSDB_USER_LEN]; SQueryNodeAddr execNode; // for the scan/modify subplan, the optional execution node SQueryNodeStat execNodeStat; // only for scan subplan SNodeList* pChildren; // the datasource subplan,from which to fetch the result diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h index c4f71e57a8..b350837551 100644 --- a/include/libs/planner/planner.h +++ b/include/libs/planner/planner.h @@ -24,18 +24,19 @@ extern "C" { #include "taos.h" typedef struct SPlanContext { - uint64_t queryId; - int32_t acctId; - SEpSet mgmtEpSet; - SNode* pAstRoot; - bool topicQuery; - bool streamQuery; - bool rSmaQuery; - bool showRewrite; - int8_t triggerType; - int64_t watermark; - char* pMsg; - int32_t msgLen; + uint64_t queryId; + int32_t acctId; + SEpSet mgmtEpSet; + SNode* pAstRoot; + bool topicQuery; + bool streamQuery; + bool rSmaQuery; + bool showRewrite; + int8_t triggerType; + int64_t watermark; + char* pMsg; + int32_t msgLen; + const char* pUser; } SPlanContext; // Create the physical plan for the query, according to the AST. diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index b3aaeaea78..73d449412f 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -59,7 +59,7 @@ static STscObj* taosConnectImpl(const char* user, const char* auth, const char* SAppInstInfo* pAppInfo, int connType); STscObj* taos_connect_internal(const char* ip, const char* user, const char* pass, const char* auth, const char* db, - uint16_t port, int connType) { + uint16_t port, int connType) { if (taos_init() != TSDB_CODE_SUCCESS) { return NULL; } @@ -327,8 +327,8 @@ bool qnodeRequired(SRequestObj* pRequest) { } SAppInstInfo* pInfo = pRequest->pTscObj->pAppInfo; - bool required = false; - + bool required = false; + taosThreadMutexLock(&pInfo->qnodeMutex); required = (NULL == pInfo->pQnodeList); taosThreadMutexUnlock(&pInfo->qnodeMutex); @@ -376,7 +376,8 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra .pAstRoot = pQuery->pRoot, .showRewrite = pQuery->showRewrite, .pMsg = pRequest->msgBuf, - .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE}; + .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, + .pUser = pRequest->pTscObj->user}; return qCreateQueryPlan(&cxt, pPlan, pNodeList); } @@ -433,11 +434,11 @@ int32_t buildVnodePolicyNodeList(SRequestObj* pRequest, SArray** pNodeList, SArr } for (int32_t j = 0; j < vgNum; ++j) { - SVgroupInfo* pInfo = taosArrayGet(pVg, j); + SVgroupInfo* pInfo = taosArrayGet(pVg, j); SQueryNodeLoad load = {0}; load.addr.nodeId = pInfo->vgId; load.addr.epSet = pInfo->epSet; - + taosArrayPush(nodeList, &load); } } @@ -495,17 +496,16 @@ _return: return TSDB_CODE_SUCCESS; } - -int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SMetaData *pResultMeta) { +int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SMetaData* pResultMeta) { SArray* pDbVgList = NULL; SArray* pQnodeList = NULL; int32_t code = 0; - + switch (tsQueryPolicy) { case QUERY_POLICY_VNODE: { if (pResultMeta) { pDbVgList = taosArrayInit(4, POINTER_BYTES); - + int32_t dbNum = taosArrayGetSize(pResultMeta->pDbVgroup); for (int32_t i = 0; i < dbNum; ++i) { SMetaRes* pRes = taosArrayGet(pResultMeta->pDbVgroup, i); @@ -514,9 +514,9 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray } taosArrayPush(pDbVgList, &pRes->pRes); - } + } } - + code = buildVnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pDbVgList); break; } @@ -537,7 +537,7 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray } taosThreadMutexUnlock(&pInst->qnodeMutex); } - + code = buildQnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pQnodeList); break; } @@ -548,7 +548,7 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray taosArrayDestroy(pDbVgList); taosArrayDestroy(pQnodeList); - + return code; } @@ -556,43 +556,43 @@ int32_t buildSyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* SArray* pDbVgList = NULL; SArray* pQnodeList = NULL; int32_t code = 0; - + switch (tsQueryPolicy) { case QUERY_POLICY_VNODE: { int32_t dbNum = taosArrayGetSize(pRequest->dbList); if (dbNum > 0) { - SCatalog* pCtg = NULL; + SCatalog* pCtg = NULL; SAppInstInfo* pInst = pRequest->pTscObj->pAppInfo; code = catalogGetHandle(pInst->clusterId, &pCtg); if (code != TSDB_CODE_SUCCESS) { goto _return; } - pDbVgList = taosArrayInit(dbNum, POINTER_BYTES); + pDbVgList = taosArrayInit(dbNum, POINTER_BYTES); SArray* pVgList = NULL; for (int32_t i = 0; i < dbNum; ++i) { - char* dbFName = taosArrayGet(pRequest->dbList, i); + char* dbFName = taosArrayGet(pRequest->dbList, i); SRequestConnInfo conn = {.pTrans = pInst->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pInst->mgmtEp)}; - + .mgmtEps = getEpSet_s(&pInst->mgmtEp)}; + code = catalogGetDBVgInfo(pCtg, &conn, dbFName, &pVgList); if (code) { goto _return; } - + taosArrayPush(pDbVgList, &pVgList); - } + } } - + code = buildVnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pDbVgList); break; } case QUERY_POLICY_HYBRID: case QUERY_POLICY_QNODE: { getQnodeList(pRequest, &pQnodeList); - + code = buildQnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pQnodeList); break; } @@ -605,11 +605,10 @@ _return: taosArrayDestroy(pDbVgList); taosArrayDestroy(pQnodeList); - + return code; } - int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList) { tsem_init(&schdRspSem, 0, 0); @@ -833,8 +832,8 @@ void schedulerExecCb(SQueryResult* pResult, void* param, int32_t code) { } } - tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64, - pRequest->self, code, tstrerror(code), pRequest->requestId); + tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, + tstrerror(code), pRequest->requestId); STscObj* pTscObj = pRequest->pTscObj; if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code)) { @@ -880,7 +879,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) { SArray* pNodeList = NULL; buildSyncExecNodeList(pRequest, &pNodeList, pMnodeList); - + code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList); taosArrayDestroy(pNodeList); } @@ -935,7 +934,7 @@ SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen, bool val return launchQueryImpl(pRequest, pQuery, false, NULL); } -void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultMeta) { +void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta) { int32_t code = 0; switch (pQuery->execMode) { @@ -968,7 +967,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultM if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) { SArray* pNodeList = NULL; buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta); - + SRequestConnInfo conn = { .pTrans = pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self}; SSchedulerReq req = {.pConn = &conn, @@ -1328,7 +1327,7 @@ TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, cons if (pObj) { return pObj->id; } - + return NULL; } @@ -1500,10 +1499,10 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int return TSDB_CODE_SUCCESS; } -static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows){ +static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { char* p = (char*)pResultInfo->pData; - int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t)); + int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t)); int32_t* colLength = (int32_t*)(p + len); len += sizeof(int32_t) * numOfCols; @@ -1513,7 +1512,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) { int32_t* offset = (int32_t*)pStart; - int32_t lenTmp = numOfRows * sizeof(int32_t); + int32_t lenTmp = numOfRows * sizeof(int32_t); len += lenTmp; pStart += lenTmp; @@ -1538,7 +1537,6 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i } else { ASSERT(0); } - } } else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { int32_t lenTmp = numOfRows * sizeof(int32_t); @@ -1562,13 +1560,13 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int break; } } - if(!needConvert) return TSDB_CODE_SUCCESS; + if (!needConvert) return TSDB_CODE_SUCCESS; - char* p = (char*)pResultInfo->pData; + char* p = (char*)pResultInfo->pData; int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows); pResultInfo->convertJson = taosMemoryCalloc(1, dataLen); - if(pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; + if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; char* p1 = pResultInfo->convertJson; int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t)); @@ -1637,7 +1635,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int ASSERT(0); } - offset1[j]= len; + offset1[j] = len; memcpy(pStart1 + len, dst, varDataTLen(dst)); len += varDataTLen(dst); } @@ -1655,7 +1653,6 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int pStart += len; pStart1 += len; memcpy(pStart1, pStart, colLen); - } pStart += colLen; pStart1 += colLen1; @@ -1723,7 +1720,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 pStart += colLength[i]; } - if(convertUcs4){ + if (convertUcs4) { code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength); } @@ -1840,17 +1837,18 @@ _OVER: return code; } -int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str, int32_t acctId, char* db) { +int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str, + int32_t acctId, char* db) { SName name; - + if (len1 <= 0) { return -1; } - const char *dbName = db; - const char *tbName = NULL; - int32_t dbLen = 0; - int32_t tbLen = 0; + const char* dbName = db; + const char* tbName = NULL; + int32_t dbLen = 0; + int32_t tbLen = 0; if (len2 > 0) { dbName = str + pos1; dbLen = len1; @@ -1861,7 +1859,7 @@ int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, i tbName = str + pos1; tbLen = len1; } - + if (tNameSetDbName(&name, acctId, dbName, dbLen)) { return -1; } @@ -1881,18 +1879,18 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, terrno = TSDB_CODE_OUT_OF_MEMORY; return terrno; } - - bool inEscape = false; + + bool inEscape = false; int32_t code = 0; - + int32_t vIdx = 0; int32_t vPos[2]; int32_t vLen[2]; memset(vPos, -1, sizeof(vPos)); memset(vLen, 0, sizeof(vLen)); - - for (int32_t i = 0; ; ++i) { + + for (int32_t i = 0;; ++i) { if (0 == *(tbList + i)) { if (vPos[vIdx] >= 0 && vLen[vIdx] <= 0) { vLen[vIdx] = i - vPos[vIdx]; @@ -1905,7 +1903,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, break; } - + if ('`' == *(tbList + i)) { inEscape = !inEscape; if (!inEscape) { @@ -1952,7 +1950,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, if (code) { goto _return; } - + memset(vPos, -1, sizeof(vPos)); memset(vLen, 0, sizeof(vLen)); vIdx = 0; @@ -1966,8 +1964,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, continue; } - if (('a' <= *(tbList + i) && 'z' >= *(tbList + i)) || - ('A' <= *(tbList + i) && 'Z' >= *(tbList + i)) || + if (('a' <= *(tbList + i) && 'z' >= *(tbList + i)) || ('A' <= *(tbList + i) && 'Z' >= *(tbList + i)) || ('0' <= *(tbList + i) && '9' >= *(tbList + i))) { if (vLen[vIdx] > 0) { goto _return; @@ -1989,32 +1986,31 @@ _return: taosArrayDestroy(*pReq); *pReq = NULL; - + return terrno; } void syncCatalogFn(SMetaData* pResult, void* param, int32_t code) { - SSyncQueryParam *pParam = param; + SSyncQueryParam* pParam = param; pParam->pRequest->code = code; tsem_post(&pParam->sem); } - -void syncQueryFn(void *param, void *res, int32_t code) { - SSyncQueryParam *pParam = param; +void syncQueryFn(void* param, void* res, int32_t code) { + SSyncQueryParam* pParam = param; pParam->pRequest = res; pParam->pRequest->code = code; tsem_post(&pParam->sem); } -void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param, bool validateOnly) { - STscObj *pTscObj = acquireTscObj(*(int64_t *)taos); +void taosAsyncQueryImpl(TAOS* taos, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly) { + STscObj* pTscObj = acquireTscObj(*(int64_t*)taos); if (pTscObj == NULL || sql == NULL || NULL == fp) { terrno = TSDB_CODE_INVALID_PARA; if (pTscObj) { - releaseTscObj(*(int64_t *)taos); + releaseTscObj(*(int64_t*)taos); } else { terrno = TSDB_CODE_TSC_DISCONNECTED; } @@ -2031,7 +2027,7 @@ void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void return; } - SRequestObj *pRequest = NULL; + SRequestObj* pRequest = NULL; int32_t code = buildRequest(pTscObj, sql, sqlLen, &pRequest); if (code != TSDB_CODE_SUCCESS) { terrno = code; @@ -2045,45 +2041,41 @@ void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void doAsyncQuery(pRequest, false); } - -TAOS_RES *taosQueryImpl(TAOS *taos, const char *sql, bool validateOnly) { +TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) { if (NULL == taos) { terrno = TSDB_CODE_TSC_DISCONNECTED; return NULL; } - STscObj *pTscObj = acquireTscObj(*(int64_t *)taos); + STscObj* pTscObj = acquireTscObj(*(int64_t*)taos); if (pTscObj == NULL || sql == NULL) { terrno = TSDB_CODE_TSC_DISCONNECTED; return NULL; } #if SYNC_ON_TOP_OF_ASYNC - SSyncQueryParam *param = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); + SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); tsem_init(¶m->sem, 0, 0); taosAsyncQueryImpl(taos, sql, syncQueryFn, param, validateOnly); tsem_wait(¶m->sem); - releaseTscObj(*(int64_t *)taos); + releaseTscObj(*(int64_t*)taos); return param->pRequest; #else size_t sqlLen = strlen(sql); if (sqlLen > (size_t)TSDB_MAX_ALLOWED_SQL_LEN) { - releaseTscObj(*(int64_t *)taos); + releaseTscObj(*(int64_t*)taos); tscError("sql string exceeds max length:%d", TSDB_MAX_ALLOWED_SQL_LEN); terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT; return NULL; } - TAOS_RES *pRes = execQuery(pTscObj, sql, sqlLen, validateOnly); + TAOS_RES* pRes = execQuery(pTscObj, sql, sqlLen, validateOnly); - releaseTscObj(*(int64_t *)taos); + releaseTscObj(*(int64_t*)taos); return pRes; #endif } - - - diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 1858918e4a..514d65d52e 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2294,6 +2294,7 @@ static const char* jkSubplanType = "SubplanType"; static const char* jkSubplanMsgType = "MsgType"; static const char* jkSubplanLevel = "Level"; static const char* jkSubplanDbFName = "DbFName"; +static const char* jkSubplanUser = "User"; static const char* jkSubplanNodeAddr = "NodeAddr"; static const char* jkSubplanRootNode = "RootNode"; static const char* jkSubplanDataSink = "DataSink"; @@ -2316,6 +2317,9 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddStringToObject(pJson, jkSubplanDbFName, pNode->dbFName); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkSubplanUser, pNode->user); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkSubplanNodeAddr, queryNodeAddrToJson, &pNode->execNode); } @@ -2352,6 +2356,9 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetStringValue(pJson, jkSubplanDbFName, pNode->dbFName); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkSubplanUser, pNode->user); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonToObject(pJson, jkSubplanNodeAddr, jsonToQueryNodeAddr, &pNode->execNode); } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index efb0f790e1..a16287cd4a 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1453,6 +1453,7 @@ static SSubplan* makeSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogicSubpl pSubplan->id = pLogicSubplan->id; pSubplan->subplanType = pLogicSubplan->subplanType; pSubplan->level = pLogicSubplan->level; + strcpy(pSubplan->user, pCxt->pPlanCxt->pUser); return pSubplan; } diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index f1ab9c8196..c197a02dd1 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -85,8 +85,9 @@ class PlannerTestBaseImpl { public: PlannerTestBaseImpl() : sqlNo_(0) {} - void useDb(const string& acctId, const string& db) { - caseEnv_.acctId_ = acctId; + void useDb(const string& user, const string& db) { + caseEnv_.acctId_ = 0; + caseEnv_.user_ = user; caseEnv_.db_ = db; caseEnv_.nsql_ = g_skipSql; } @@ -193,7 +194,8 @@ class PlannerTestBaseImpl { private: struct caseEnv { - string acctId_; + int32_t acctId_; + string user_; string db_; int32_t nsql_; @@ -295,7 +297,7 @@ class PlannerTestBaseImpl { transform(stmtEnv_.sql_.begin(), stmtEnv_.sql_.end(), stmtEnv_.sql_.begin(), ::tolower); SParseContext cxt = {0}; - cxt.acctId = atoi(caseEnv_.acctId_.c_str()); + cxt.acctId = caseEnv_.acctId_; cxt.db = caseEnv_.db_.c_str(); cxt.pSql = stmtEnv_.sql_.c_str(); cxt.sqlLen = stmtEnv_.sql_.length(); @@ -319,12 +321,13 @@ class PlannerTestBaseImpl { void doParseBoundSql(SQuery* pQuery) { SParseContext cxt = {0}; - cxt.acctId = atoi(caseEnv_.acctId_.c_str()); + cxt.acctId = caseEnv_.acctId_; cxt.db = caseEnv_.db_.c_str(); cxt.pSql = stmtEnv_.sql_.c_str(); cxt.sqlLen = stmtEnv_.sql_.length(); cxt.pMsg = stmtEnv_.msgBuf_.data(); cxt.msgLen = stmtEnv_.msgBuf_.max_size(); + cxt.pUser = caseEnv_.user_.c_str(); DO_WITH_THROW(qStmtParseQuerySql, &cxt, pQuery); res_.ast_ = toString(pQuery->pRoot); @@ -364,6 +367,7 @@ class PlannerTestBaseImpl { void setPlanContext(SQuery* pQuery, SPlanContext* pCxt) { pCxt->queryId = 1; + pCxt->pUser = caseEnv_.user_.c_str(); if (QUERY_NODE_CREATE_TOPIC_STMT == nodeType(pQuery->pRoot)) { pCxt->pAstRoot = ((SCreateTopicStmt*)pQuery->pRoot)->pQuery; pCxt->topicQuery = true; @@ -403,7 +407,7 @@ PlannerTestBase::PlannerTestBase() : impl_(new PlannerTestBaseImpl()) {} PlannerTestBase::~PlannerTestBase() {} -void PlannerTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); } +void PlannerTestBase::useDb(const std::string& user, const std::string& db) { impl_->useDb(user, db); } void PlannerTestBase::run(const std::string& sql) { return impl_->run(sql); } diff --git a/source/libs/planner/test/planTestUtil.h b/source/libs/planner/test/planTestUtil.h index b188a7a054..7d2a9e533f 100644 --- a/source/libs/planner/test/planTestUtil.h +++ b/source/libs/planner/test/planTestUtil.h @@ -30,7 +30,7 @@ class PlannerTestBase : public testing::Test { PlannerTestBase(); virtual ~PlannerTestBase(); - void useDb(const std::string& acctId, const std::string& db); + void useDb(const std::string& user, const std::string& db); void run(const std::string& sql); // stmt mode APIs void prepare(const std::string& sql); From 4bc09c22adb0922ef2705869989e35222f0fc41c Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 24 Jun 2022 18:05:57 +0800 Subject: [PATCH 08/16] test:merge 3.0 --- tests/test/c/tmqSim.c | 122 ++++++++++++++++++++++-------------------- 1 file changed, 63 insertions(+), 59 deletions(-) diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index 59005fae94..c2fba52e6e 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -22,9 +22,9 @@ #include #include "taos.h" +#include "taosdef.h" #include "taoserror.h" #include "tlog.h" -#include "taosdef.h" #include "types.h" #define GREEN "\033[1;32m" @@ -36,11 +36,7 @@ #define MAX_CONSUMER_THREAD_CNT (16) #define MAX_VGROUP_CNT (32) -typedef enum { - NOTIFY_CMD_START_CONSUM, - NOTIFY_CMD_START_COMMIT, - NOTIFY_CMD_ID_BUTT -}NOTIFY_CMD_ID; +typedef enum { NOTIFY_CMD_START_CONSUM, NOTIFY_CMD_START_COMMIT, NOTIFY_CMD_ID_BUTT } NOTIFY_CMD_ID; typedef struct { TdThread thread; @@ -52,8 +48,8 @@ typedef struct { // char autoOffsetRest[16]; // none, earliest, latest TdFilePtr pConsumeRowsFile; - int32_t ifCheckData; - int64_t expectMsgCnt; + int32_t ifCheckData; + int64_t expectMsgCnt; int64_t consumeMsgCnt; int64_t consumeRowCnt; @@ -89,6 +85,7 @@ typedef struct { int32_t saveRowFlag; int32_t consumeDelay; // unit s int32_t numOfThread; + int32_t useSnapshot; SThreadInfo stThreads[MAX_CONSUMER_THREAD_CNT]; } SConfInfo; @@ -217,6 +214,8 @@ void parseArgument(int32_t argc, char* argv[]) { g_stConfInfo.saveRowFlag = atol(argv[++i]); } else if (strcmp(argv[i], "-y") == 0) { g_stConfInfo.consumeDelay = atol(argv[++i]); + } else if (strcmp(argv[i], "-e") == 0) { + g_stConfInfo.useSnapshot = atol(argv[++i]); } else { pError("%s unknow para: %s %s", GREEN, argv[++i], NC); exit(-1); @@ -310,11 +309,11 @@ int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) { return 0; } -static char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { - //if (shell.args.is_raw_time) { - // sprintf(buf, "%" PRId64, val); - // return buf; - //} +static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { + // if (shell.args.is_raw_time) { + // sprintf(buf, "%" PRId64, val); + // return buf; + // } time_t tt; int32_t ms = 0; @@ -352,7 +351,7 @@ static char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { } } - struct tm *ptm = taosLocalTime(&tt, NULL); + struct tm* ptm = taosLocalTime(&tt, NULL); size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", ptm); if (precision == TSDB_TIME_PRECISION_NANO) { @@ -366,7 +365,8 @@ static char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { return buf; } -static void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) { +static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length, + int32_t precision) { if (val == NULL) { taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); return; @@ -376,31 +376,31 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *f char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: - taosFprintfFile(pFile, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); + taosFprintfFile(pFile, "%d", ((((int32_t)(*((char*)val))) == 1) ? 1 : 0)); break; case TSDB_DATA_TYPE_TINYINT: - taosFprintfFile(pFile, "%d", *((int8_t *)val)); + taosFprintfFile(pFile, "%d", *((int8_t*)val)); break; case TSDB_DATA_TYPE_UTINYINT: - taosFprintfFile(pFile, "%u", *((uint8_t *)val)); + taosFprintfFile(pFile, "%u", *((uint8_t*)val)); break; case TSDB_DATA_TYPE_SMALLINT: - taosFprintfFile(pFile, "%d", *((int16_t *)val)); + taosFprintfFile(pFile, "%d", *((int16_t*)val)); break; case TSDB_DATA_TYPE_USMALLINT: - taosFprintfFile(pFile, "%u", *((uint16_t *)val)); + taosFprintfFile(pFile, "%u", *((uint16_t*)val)); break; case TSDB_DATA_TYPE_INT: - taosFprintfFile(pFile, "%d", *((int32_t *)val)); + taosFprintfFile(pFile, "%d", *((int32_t*)val)); break; case TSDB_DATA_TYPE_UINT: - taosFprintfFile(pFile, "%u", *((uint32_t *)val)); + taosFprintfFile(pFile, "%u", *((uint32_t*)val)); break; case TSDB_DATA_TYPE_BIGINT: - taosFprintfFile(pFile, "%" PRId64, *((int64_t *)val)); + taosFprintfFile(pFile, "%" PRId64, *((int64_t*)val)); break; case TSDB_DATA_TYPE_UBIGINT: - taosFprintfFile(pFile, "%" PRIu64, *((uint64_t *)val)); + taosFprintfFile(pFile, "%" PRIu64, *((uint64_t*)val)); break; case TSDB_DATA_TYPE_FLOAT: taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val)); @@ -421,7 +421,7 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *f taosFprintfFile(pFile, "\'%s\'", buf); break; case TSDB_DATA_TYPE_TIMESTAMP: - shellFormatTimestamp(buf, *(int64_t *)val, precision); + shellFormatTimestamp(buf, *(int64_t*)val, precision); taosFprintfFile(pFile, "'%s'", buf); break; default: @@ -429,12 +429,13 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *f } } -static void dumpToFileForCheck(TdFilePtr pFile, TAOS_ROW row, TAOS_FIELD* fields, int32_t* length, int32_t num_fields, int32_t precision) { +static void dumpToFileForCheck(TdFilePtr pFile, TAOS_ROW row, TAOS_FIELD* fields, int32_t* length, int32_t num_fields, + int32_t precision) { for (int32_t i = 0; i < num_fields; i++) { if (i > 0) { taosFprintfFile(pFile, "\n"); } - shellDumpFieldToFile(pFile, (const char *)row[i], fields + i, length[i], precision); + shellDumpFieldToFile(pFile, (const char*)row[i], fields + i, length[i], precision); } taosFprintfFile(pFile, "\n"); } @@ -444,40 +445,42 @@ static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) int32_t totalRows = 0; // printf("topic: %s\n", tmq_get_topic_name(msg)); - int32_t vgroupId = tmq_get_vgroup_id(msg); - const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); + const char* dbName = tmq_get_db_name(msg); taosFprintfFile(g_fp, "consumerId: %d, msg index:%" PRId64 "\n", pInfo->consumerId, msgIndex); - taosFprintfFile(g_fp, "dbName: %s, topic: %s, vgroupId: %d\n", dbName != NULL ? dbName : "invalid table", tmq_get_topic_name(msg), vgroupId); + taosFprintfFile(g_fp, "dbName: %s, topic: %s, vgroupId: %d\n", dbName != NULL ? dbName : "invalid table", + tmq_get_topic_name(msg), vgroupId); while (1) { TAOS_ROW row = taos_fetch_row(msg); if (row == NULL) break; - TAOS_FIELD* fields = taos_fetch_fields(msg); + TAOS_FIELD* fields = taos_fetch_fields(msg); int32_t numOfFields = taos_field_count(msg); - int32_t* length = taos_fetch_lengths(msg); - int32_t precision = taos_result_precision(msg); - const char* tbName = tmq_get_table_name(msg); + int32_t* length = taos_fetch_lengths(msg); + int32_t precision = taos_result_precision(msg); + const char* tbName = tmq_get_table_name(msg); - #if 0 + #if 0 // get schema //============================== stub =================================================// for (int32_t i = 0; i < numOfFields; i++) { taosFprintfFile(g_fp, "%02d: name: %s, type: %d, len: %d\n", i, fields[i].name, fields[i].type, fields[i].bytes); } //============================== stub =================================================// - #endif - - dumpToFileForCheck(pInfo->pConsumeRowsFile, row, fields, length, numOfFields, precision); + #endif + + dumpToFileForCheck(pInfo->pConsumeRowsFile, row, fields, length, numOfFields, precision); + taos_print_row(buf, row, fields, numOfFields); if (0 != g_stConfInfo.showRowFlag) { taosFprintfFile(g_fp, "tbname:%s, rows[%d]: %s\n", (tbName != NULL ? tbName : "null table"), totalRows, buf); - //if (0 != g_stConfInfo.saveRowFlag) { - // saveConsumeContentToTbl(pInfo, buf); - //} + // if (0 != g_stConfInfo.saveRowFlag) { + // saveConsumeContentToTbl(pInfo, buf); + // } } totalRows++; @@ -500,8 +503,7 @@ int queryDB(TAOS* taos, char* command) { return 0; } -static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) { -} +static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) {} int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { char sqlStr[1024] = {0}; @@ -509,11 +511,8 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { int64_t now = taosGetTimestampMs(); // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int - sprintf(sqlStr, "insert into %s.notifyinfo values (%"PRId64", %d, %d)", - g_stConfInfo.cdbName, - now, - cmdId, - pInfo->consumerId); + sprintf(sqlStr, "insert into %s.notifyinfo values (%" PRId64 ", %d, %d)", g_stConfInfo.cdbName, now, cmdId, + pInfo->consumerId); taos_query_a(pInfo->taos, sqlStr, appNothing, NULL); @@ -523,12 +522,12 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { } static int32_t g_once_commit_flag = 0; -static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { - pError("tmq_commit_cb_print() commit %d\n", code); +static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + pError("tmq_commit_cb_print() commit %d\n", code); if (0 == g_once_commit_flag) { g_once_commit_flag = 1; - notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); + notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); } char tmpString[128]; @@ -564,6 +563,10 @@ void build_consumer(SThreadInfo* pInfo) { // tmq_conf_set(conf, "auto.offset.reset", "none"); // tmq_conf_set(conf, "auto.offset.reset", "earliest"); // tmq_conf_set(conf, "auto.offset.reset", "latest"); + // + if (g_stConfInfo.useSnapshot) { + tmq_conf_set(conf, "experiment.use.snapshot", "true"); + } pInfo->tmq = tmq_consumer_new(conf, NULL, 0); @@ -618,12 +621,13 @@ void loop_consume(SThreadInfo* pInfo) { pInfo->consumerId); pInfo->ts = taosGetTimestampMs(); - + if (pInfo->ifCheckData) { - char filename[256] = {0}; + char filename[256] = {0}; char tmpString[128]; - //sprintf(filename, "%s/../log/consumerid_%d_%s.txt", configDir, pInfo->consumerId, getCurrentTimeString(tmpString)); - sprintf(filename, "%s/../log/consumerid_%d.txt", configDir, pInfo->consumerId); + // sprintf(filename, "%s/../log/consumerid_%d_%s.txt", configDir, pInfo->consumerId, + // getCurrentTimeString(tmpString)); + sprintf(filename, "%s/../log/consumerid_%d.txt", configDir, pInfo->consumerId); pInfo->pConsumeRowsFile = taosOpenFile(filename, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM); if (pInfo->pConsumeRowsFile == NULL) { taosFprintfFile(g_fp, "%s create file fail for save rows content\n", getCurrentTimeString(tmpString)); @@ -642,10 +646,10 @@ void loop_consume(SThreadInfo* pInfo) { totalMsgs++; - if (0 == once_flag) { + if (0 == once_flag) { once_flag = 1; - notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM); - } + notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM); + } if (totalRows >= pInfo->expectMsgCnt) { char tmpString[128]; @@ -678,7 +682,7 @@ void* consumeThreadFunc(void* param) { pInfo->taos = taos_connect(NULL, "root", "taosdata", NULL, 0); if (pInfo->taos == NULL) { taosFprintfFile(g_fp, "taos_connect() fail, can not notify and save consume result to main scripte\n"); - return NULL; + return NULL; } build_consumer(pInfo); From 0701da48f4fa2a2c51bf1f76a1f957b26ce83874 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Fri, 24 Jun 2022 18:39:44 +0800 Subject: [PATCH 09/16] feat: subplan adds 'user' field --- source/client/src/clientImpl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 73d449412f..489966b636 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -955,7 +955,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM .pAstRoot = pQuery->pRoot, .showRewrite = pQuery->showRewrite, .pMsg = pRequest->msgBuf, - .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE}; + .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, + .pUser = pRequest->pTscObj->user}; SAppInstInfo* pAppInfo = getAppInfo(pRequest); code = qCreateQueryPlan(&cxt, &pRequest->body.pDag, pMnodeList); From f810212d29f649b6e9669cfd67f9a8f3c1742953 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 24 Jun 2022 19:03:56 +0800 Subject: [PATCH 10/16] fix: no redirect resp --- source/dnode/mnode/impl/src/mndMain.c | 31 +++++++++++++++++---------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 000e1041d0..320dd4127e 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -529,24 +529,33 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { if (!IsReq(pMsg)) return 0; if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER || - pMsg->msgType == TDMT_MND_TRANS_TIMER || TDMT_MND_TTL_TIMER) { + pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER) { return -1; } - const STraceId *trace = &pMsg->info.traceId; - mError("msg:%p, failed to check mnode state since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pMsg->msgType)); - SEpSet epSet = {0}; mndGetMnodeEpSet(pMsg->info.node, &epSet); - int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); - pMsg->info.rsp = rpcMallocCont(contLen); - if (pMsg->info.rsp != NULL) { - tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); - pMsg->info.rspLen = contLen; - terrno = TSDB_CODE_RPC_REDIRECT; + const STraceId *trace = &pMsg->info.traceId; + mError("msg:%p, failed to check mnode state since %s, type:%s, numOfMnodes:%d inUse:%d", pMsg, terrstr(), + TMSG_INFO(pMsg->msgType), epSet.numOfEps, epSet.inUse); + + if (epSet.numOfEps > 0) { + for (int32_t i = 0; i < epSet.numOfEps; ++i) { + mInfo("mnode index:%d, ep:%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port); + } + + int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); + pMsg->info.rsp = rpcMallocCont(contLen); + if (pMsg->info.rsp != NULL) { + tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); + pMsg->info.rspLen = contLen; + terrno = TSDB_CODE_RPC_REDIRECT; + } else { + terrno = TSDB_CODE_OUT_OF_MEMORY; + } } else { - terrno = TSDB_CODE_OUT_OF_MEMORY; + terrno = TSDB_CODE_APP_NOT_READY; } return -1; From 994232c7ac372b0bcecea97601e8246d06482d9f Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Fri, 24 Jun 2022 19:10:00 +0800 Subject: [PATCH 11/16] feat: subplan adds 'user' field --- source/libs/planner/src/planPhysiCreater.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index a16287cd4a..37b69ffdfe 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1453,7 +1453,9 @@ static SSubplan* makeSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogicSubpl pSubplan->id = pLogicSubplan->id; pSubplan->subplanType = pLogicSubplan->subplanType; pSubplan->level = pLogicSubplan->level; - strcpy(pSubplan->user, pCxt->pPlanCxt->pUser); + if (NULL != pCxt->pPlanCxt->pUser) { + strcpy(pSubplan->user, pCxt->pPlanCxt->pUser); + } return pSubplan; } From bd13d0b7ad27909026852ededf0fe34ed6822729 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 24 Jun 2022 19:31:45 +0800 Subject: [PATCH 12/16] test: modify case --- tests/system-test/6-cluster/5dnode3mnodeStop.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop.py b/tests/system-test/6-cluster/5dnode3mnodeStop.py index 69b9c3d879..3a85207af6 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop.py @@ -227,6 +227,7 @@ class TDTestCase: # fisr add three mnodes; tdSql.execute("create mnode on dnode 2") + time.sleep(10) tdSql.execute("create mnode on dnode 3") # fisrt check statut ready From f8bb36ce2c8b5c2e3b536a2a513b316fa1896536 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 24 Jun 2022 20:08:07 +0800 Subject: [PATCH 13/16] enh(query): add _group_key function for partion by TD-16805 --- include/libs/function/functionMgt.h | 1 + source/libs/function/inc/builtinsimpl.h | 3 +++ source/libs/function/src/builtins.c | 22 ++++++++++++++++++- source/libs/function/src/builtinsimpl.c | 28 +++++++++++++++++++++++++ 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 1ca51b3043..1ed78750d1 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -124,6 +124,7 @@ typedef enum EFunctionType { FUNCTION_TYPE_BLOCK_DIST, // block distribution aggregate function FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function FUNCTION_TYPE_TO_COLUMN, + FUNCTION_TYPE_GROUP_KEY, // distributed splitting functions FUNCTION_TYPE_APERCENTILE_PARTIAL = 4000, diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index 4d2f926c12..b3ce6abe87 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -202,6 +202,9 @@ bool blockDistSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); int32_t blockDistFunction(SqlFunctionCtx *pCtx); int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv); +int32_t groupKeyFunction(SqlFunctionCtx* pCtx); + #ifdef __cplusplus } #endif diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 6516a10795..f3f1097148 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1515,6 +1515,16 @@ static bool getBlockDistFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv return true; } +static int32_t translateGroupKey(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + if (1 != LIST_LENGTH(pFunc->pParameterList)) { + return TSDB_CODE_SUCCESS; + } + + SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); + pFunc->node.resType = ((SExprNode*)pPara)->resType; + return TSDB_CODE_SUCCESS; +} + // clang-format off const SBuiltinFuncDefinition funcMgtBuiltins[] = { { @@ -2499,7 +2509,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_BLOCK_DIST_INFO, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC, .translateFunc = translateBlockDistInfoFunc, - } + }, + { + .name = "_group_key", + .type = FUNCTION_TYPE_GROUP_KEY, + .classification = FUNC_MGT_AGG_FUNC, + .translateFunc = translateGroupKey, + .getEnvFunc = getGroupKeyFuncEnv, + .initFunc = functionSetup, + .processFunc = groupKeyFunction, + .finalizeFunc = functionFinalize, + }, }; // clang-format on diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 4ec6432fc8..42c316b01d 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2402,6 +2402,12 @@ bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { return true; } +bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { + SColumnNode* pNode = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0); + pEnv->calcMemSize = pNode->node.resType.bytes; + return true; +} + static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowIndex) { if (pTsColInfo == NULL) { return 0; @@ -5349,6 +5355,28 @@ int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return pResInfo->numOfRes; } +int32_t groupKeyFunction(SqlFunctionCtx* pCtx) { + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + char* buf = GET_ROWCELL_INTERBUF(pResInfo); + + SInputColumnInfoData* pInput = &pCtx->input; + SColumnInfoData* pInputCol = pInput->pData[0]; + + int32_t bytes = pInputCol->info.bytes; + + int32_t startIndex = pInput->startRowIndex; + if (colDataIsNull_s(pInputCol, startIndex)) { + pResInfo->numOfRes = 0; + return TSDB_CODE_SUCCESS; + } + + char* data = colDataGetData(pInputCol, startIndex); + memcpy(buf, data, bytes); + SET_VAL(pResInfo, 1, 1); + + return TSDB_CODE_SUCCESS; +} + int32_t interpFunction(SqlFunctionCtx* pCtx) { #if 0 int32_t fillType = (int32_t) pCtx->param[2].i64; From 5c3fae340ecbd16fc34a9fdd6a27fdc0454dedec Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 24 Jun 2022 20:26:46 +0800 Subject: [PATCH 14/16] add NULL logic for _group_key --- source/libs/function/inc/builtinsimpl.h | 1 + source/libs/function/src/builtins.c | 2 +- source/libs/function/src/builtinsimpl.c | 33 ++++++++++++++++++++----- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index b3ce6abe87..0820e884ce 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -204,6 +204,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t groupKeyFunction(SqlFunctionCtx* pCtx); +int32_t groupKeyFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); #ifdef __cplusplus } diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index f3f1097148..6744ebe4b9 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2518,7 +2518,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getGroupKeyFuncEnv, .initFunc = functionSetup, .processFunc = groupKeyFunction, - .finalizeFunc = functionFinalize, + .finalizeFunc = groupKeyFinalize, }, }; // clang-format on diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 42c316b01d..61f3154811 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -262,6 +262,12 @@ typedef struct SRateInfo { int8_t hasResult; // flag to denote has value } SRateInfo; +typedef struct SGroupKeyInfo{ + bool hasResult; + char data[]; +} SGroupKeyInfo; + + #define SET_VAL(_info, numOfElem, res) \ do { \ if ((numOfElem) <= 0) { \ @@ -2404,7 +2410,7 @@ bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { SColumnNode* pNode = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0); - pEnv->calcMemSize = pNode->node.resType.bytes; + pEnv->calcMemSize = sizeof(SGroupKeyInfo) + pNode->node.resType.bytes; return true; } @@ -5357,7 +5363,7 @@ int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t groupKeyFunction(SqlFunctionCtx* pCtx) { SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - char* buf = GET_ROWCELL_INTERBUF(pResInfo); + SGroupKeyInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pInputCol = pInput->pData[0]; @@ -5366,17 +5372,32 @@ int32_t groupKeyFunction(SqlFunctionCtx* pCtx) { int32_t startIndex = pInput->startRowIndex; if (colDataIsNull_s(pInputCol, startIndex)) { - pResInfo->numOfRes = 0; - return TSDB_CODE_SUCCESS; + pInfo->hasResult = false; + goto _group_key_over; } + pInfo->hasResult = true; char* data = colDataGetData(pInputCol, startIndex); - memcpy(buf, data, bytes); - SET_VAL(pResInfo, 1, 1); + memcpy(pInfo->data, data, bytes); +_group_key_over: + + SET_VAL(pResInfo, 1, 1); return TSDB_CODE_SUCCESS; } +int32_t groupKeyFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + int32_t slotId = pCtx->pExpr->base.resSchema.slotId; + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); + + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + + SGroupKeyInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); + colDataAppend(pCol, pBlock->info.rows, pInfo->data, pInfo->hasResult ? false : true); + + return pResInfo->numOfRes; +} + int32_t interpFunction(SqlFunctionCtx* pCtx) { #if 0 int32_t fillType = (int32_t) pCtx->param[2].i64; From 0964b072290ae1c64bcd9168db31c750b30af046 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Fri, 24 Jun 2022 21:44:53 +0800 Subject: [PATCH 15/16] fix(stream): support none type in submit msg --- examples/c/stream_demo.c | 17 ++++++++++++----- source/dnode/vnode/src/tq/tqRead.c | 2 +- source/libs/stream/src/streamDispatch.c | 3 +-- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c index 961eb6c93a..a4fc30ff65 100644 --- a/examples/c/stream_demo.c +++ b/examples/c/stream_demo.c @@ -25,7 +25,7 @@ int32_t init_env() { return -1; } - TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1"); + TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 2"); if (taos_errno(pRes) != 0) { printf("error in create db, reason:%s\n", taos_errstr(pRes)); return -1; @@ -68,6 +68,14 @@ int32_t init_env() { return -1; } taos_free_result(pRes); + + pRes = taos_query(pConn, "create table if not exists tu3 using st1 tags(3)"); + if (taos_errno(pRes) != 0) { + printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + return 0; } @@ -90,10 +98,9 @@ int32_t create_stream() { /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/ /*const char* sql = "select sum(k) from tu1 interval(10m)";*/ /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ - pRes = taos_query( - pConn, - "create stream stream1 trigger window_close watermark 10s into outstb as select _wstartts, sum(k) from st1 " - "interval(10s) "); + pRes = taos_query(pConn, + "create stream stream1 trigger at_once into outstb as select _wstartts, sum(k) from st1 " + "partition by tbname interval(10s) "); if (taos_errno(pRes) != 0) { printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 0243f2a9f0..6184d8f810 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -243,7 +243,7 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle, uint64_ if (!tdSTSRowIterNext(&iter, pColData->info.colId, pColData->info.type, &sVal)) { break; } - if (colDataAppend(pColData, curRow, sVal.val, sVal.valType == TD_VTYPE_NULL) < 0) { + if (colDataAppend(pColData, curRow, sVal.val, sVal.valType != TD_VTYPE_NORM) < 0) { goto FAIL; } } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 179dc88d2a..a8b18210dd 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -63,7 +63,6 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { } int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq) { - // if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->dstNodeId) < 0) return -1; @@ -84,7 +83,7 @@ int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq) { if (tDecodeI32(pDecoder, &pReq->srcTaskId) < 0) return -1; uint64_t len = 0; if (tDecodeBinaryAlloc(pDecoder, (void**)&pReq->pRetrieve, &len) < 0) return -1; - pReq->retrieveLen = len; + pReq->retrieveLen = (int32_t)len; tEndDecode(pDecoder); return 0; } From 0cc28d6ee2fd3e86a1080ef8b560bc5289cd7323 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 25 Jun 2022 08:27:11 +0800 Subject: [PATCH 16/16] test: minor changes --- tests/script/tsim/mnode/basic5.sim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/script/tsim/mnode/basic5.sim b/tests/script/tsim/mnode/basic5.sim index 23f5f6d782..c017d7f23f 100644 --- a/tests/script/tsim/mnode/basic5.sim +++ b/tests/script/tsim/mnode/basic5.sim @@ -157,7 +157,7 @@ step61: if $x == 10 then return -1 endi -sql show mnodes +sql show mnodes -x step61 print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 print ===> $data20 $data21 $data22 $data23 $data24 $data25