From 32b0ac885dd4754c2a8fee00df223da3108aae54 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 26 Jul 2022 13:53:01 +0800 Subject: [PATCH 01/63] add case about query and constantly insert datas --- tests/system-test/2-query/max_partition.py | 8 +- ...mnode_basic_replica1_insertdatas_querys.py | 201 ++++++++++++++++++ ...mnode_basic_replica3_insertdatas_querys.py | 201 ++++++++++++++++++ 3 files changed, 403 insertions(+), 7 deletions(-) create mode 100644 tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py create mode 100644 tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py index 109c9075f5..2e27a6a787 100644 --- a/tests/system-test/2-query/max_partition.py +++ b/tests/system-test/2-query/max_partition.py @@ -194,13 +194,7 @@ class TDTestCase: tdSql.checkRows(90) # bug need fix # tdSql.checkData(0,1,None) - - - - - - - + # bug need fix # tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ") # tdSql.checkRows(5) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py new file mode 100644 index 0000000000..df3e6f24e9 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py @@ -0,0 +1,201 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 1 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 2000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + + def check_insert_status(self, dbname, tb_nums , row_nums): + newTdSql=tdCom.newTdSql() + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + self.check_insert_status( db_name, tb_nums , row_nums) + + def run(self): + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + writing.join() + reading.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py new file mode 100644 index 0000000000..b92378060f --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py @@ -0,0 +1,201 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 2000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + + def check_insert_status(self, dbname, tb_nums , row_nums): + newTdSql=tdCom.newTdSql() + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + self.check_insert_status( db_name, tb_nums , row_nums) + + def run(self): + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + writing.join() + reading.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 217b555794509a75a0d0dce6443ab024a785a78c Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 26 Jul 2022 13:54:20 +0800 Subject: [PATCH 02/63] Delete max_partition.py --- tests/system-test/2-query/max_partition.py | 230 --------------------- 1 file changed, 230 deletions(-) delete mode 100644 tests/system-test/2-query/max_partition.py diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py deleted file mode 100644 index 2e27a6a787..0000000000 --- a/tests/system-test/2-query/max_partition.py +++ /dev/null @@ -1,230 +0,0 @@ -# author : wenzhouwww -from util.log import * -from util.sql import * -from util.cases import * - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor()) - - self.row_nums = 10 - self.tb_nums = 10 - self.ts = 1537146000000 - - def prepare_datas(self, stb_name , tb_nums , row_nums ): - tdSql.execute(" use db ") - tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\ - uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\ - , t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ") - - for i in range(tb_nums): - tbname = f"sub_{stb_name}_{i}" - ts = self.ts + i*10000 - tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )") - - for row in range(row_nums): - ts = self.ts + row*1000 - tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )") - - for null in range(5): - ts = self.ts + row_nums*1000 + null*1000 - tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )") - - def basic_query(self): - tdSql.query("select count(*) from stb") - tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums) - tdSql.query("select max(c1) from stb") - tdSql.checkData(0,0,(self.row_nums -1)) - tdSql.query(" select tbname , max(c1) from stb partition by tbname ") - tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by t1 order by t1 ") - tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by c1 order by t1 ") - tdSql.query(" select max(t2) from stb group by c1 order by t1 ") - tdSql.query(" select max(c1) from stb group by tbname order by tbname ") - tdSql.checkRows(self.tb_nums) - # bug need fix - tdSql.query(" select max(t2) from stb group by t2 order by t2 ") - tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by c1 order by c1 ") - tdSql.checkRows(self.row_nums+1) - - tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ") - tdSql.checkRows(self.row_nums+1) - - # support selective functions - tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ") - tdSql.checkRows(self.row_nums+1) - - tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ") - tdSql.checkRows(self.row_nums+1) - - # bug need fix - # tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ") - # tdSql.checkRows(1) - # tdSql.checkData(0,0,"sub_stb_1") - - tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)") - tdSql.checkRows(self.row_nums+1) - tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)") - tdSql.checkRows(self.row_nums+1) - tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)") - tdSql.checkRows(self.row_nums+1) - tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2") - tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2") - tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)") - tdSql.checkRows(self.row_nums+1) - - tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ") - tdSql.checkRows(2) - tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ") - tdSql.checkRows(2) - - tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ") - tdSql.checkRows(self.tb_nums) - tdSql.checkData(0,1,self.row_nums-1) - - tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1") - tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1") - tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2") - - # # bug need fix - tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2") - tdSql.checkRows(self.tb_nums) - - tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname") - tdSql.checkRows(self.tb_nums) - tdSql.checkData(0,1,self.row_nums-1) - - - tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2") - - tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc") - tdSql.checkRows(self.tb_nums+1) - tdSql.checkData(0,1,self.row_nums-1) - - tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2") - - - tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2") - tdSql.checkRows(self.tb_nums*(self.row_nums+5)) - - tdSql.query("select max(c1) , count(t2) from stb partition by c2 ") - tdSql.checkRows(self.row_nums+1) - tdSql.checkData(0,1,self.row_nums) - - tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2") - tdSql.checkRows(self.row_nums+1) - - tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname") - tdSql.checkRows(self.tb_nums) - tdSql.checkCols(4) - - tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1") - tdSql.checkRows(self.tb_nums) - tdSql.checkData(0,0,self.row_nums) - - # bug need fix - tdSql.query("select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)") - tdSql.checkRows(self.row_nums+1) - - - tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)") - tdSql.checkRows(self.row_nums+1) - - - tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))") - tdSql.checkRows(self.row_nums+1) - - tdSql.query("select tbname , max(c1) ,c1 from stb partition by tbname order by tbname") - tdSql.checkRows(self.tb_nums) - tdSql.checkData(0,0,'sub_stb_0') - tdSql.checkData(0,1,9) - tdSql.checkData(0,2,9) - - tdSql.query("select tbname ,top(c1,1) ,c1 from stb partition by tbname order by tbname") - tdSql.checkRows(self.tb_nums) - - tdSql.query(" select c1 , sample(c1,2) from stb partition by tbname order by tbname ") - tdSql.checkRows(self.tb_nums*2) - - - # interval - tdSql.query("select max(c1) from stb interval(2s) sliding(1s)") - - # bug need fix - - tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)') - - tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ") - - tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)") - tdSql.checkRows(self.row_nums*2) - - tdSql.query("select unique(c1) from stb partition by tbname order by tbname") - - tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)") - tdSql.checkData(0,0,'sub_stb_1') - tdSql.checkData(0,1,self.row_nums) - - tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1") - tdSql.checkRows(90) - - tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1") - tdSql.checkRows(90) - - tdSql.query("select c1 , csum(c1) from stb partition by c1") - tdSql.checkRows(100) - - tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1") - tdSql.checkRows(21) - # bug need fix - # tdSql.checkData(0,1,None) - - tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1") - tdSql.checkRows(11) - tdSql.checkData(0,1,None) - - tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1") - tdSql.checkRows(11) - tdSql.checkData(0,1,None) - - tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1") - tdSql.checkRows(90) - # bug need fix - # tdSql.checkData(0,1,None) - - # bug need fix - # tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ") - # tdSql.checkRows(5) - - # tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 1 ") - # tdSql.checkRows(5) - - tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ") - - tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') - tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') - - - def run(self): - tdSql.prepare() - self.prepare_datas("stb",self.tb_nums,self.row_nums) - self.basic_query() - - # # coverage case for taosd crash about bug fix - tdSql.query(" select sum(c1) from stb where t2+10 >1 ") - tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ") - tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ") - tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ") - tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ") - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) From f0e6fe0fb4eedeb91a54e5bd5e4f645cb9966f88 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 26 Jul 2022 16:53:26 +0800 Subject: [PATCH 03/63] test case update --- ...sertdatas_querys_loop_restart_all_vnode.py | 354 ++++++++++++++++++ ...nsertdatas_querys_loop_restart_follower.py | 300 +++++++++++++++ ..._insertdatas_querys_loop_restart_leader.py | 354 ++++++++++++++++++ ...replica3_insertdatas_stop_follower_sync.py | 6 +- ...ca3_insertdatas_stop_leader_forece_stop.py | 1 + ...e_basic_replica3_querydatas_stop_leader.py | 24 +- 6 files changed, 1025 insertions(+), 14 deletions(-) create mode 100644 tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py create mode 100644 tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py create mode 100644 tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py new file mode 100644 index 0000000000..d693e0c38d --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py @@ -0,0 +1,354 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 1000 + self.max_restart_time = 20 + self.restart_server_times = 10 + self.dnode_index = 0 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + if row_num % (int(row_nums*0.1)) == 0 : + tdLog.notice( " === database {} writing records {} rows".format(dbname , row_num ) ) + ts = self.ts + 1000*row_num + + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + def _get_stop_dnode_id(self): + + + dnode_lists = list(set(self.dnode_list.keys()) -set(self.mnode_list.keys())) + # print(dnode_lists) + self.stop_dnode_id = self.dnode_list[dnode_lists[self.dnode_index % 3]][0] + + self.dnode_index += 1 + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def get_leader_infos(self , newTdSql , dbname): + + # newTdSql=tdCom.newTdSql() + newTdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = newTdSql.queryResult + + leader_infos = set() + for vgroup_info in vgroup_infos: + leader_infos.add(vgroup_info[3:-4]) + + return leader_infos + + def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos): + + check_status = False + vote_act = set(set(after_leader_infos)-set(before_leader_infos)) + if not vote_act: + print("=======before_revote_leader_infos ======\n" , before_leader_infos) + print("=======after_revote_leader_infos ======\n" , after_leader_infos) + tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====") + else: + for vgroup_info in vote_act: + for ind , role in enumerate(vgroup_info): + if role==self.stop_dnode_id: + + if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: + tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1])) + check_status = True + elif vgroup_info[ind+1] !="offline": + tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id)) + else: + continue + break + return check_status + + + def check_insert_status(self, newTdSql , dbname, tb_nums , row_nums): + + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + + def loop_restart_follower_constantly(self, times , db_name): + + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() + + for loop_time in range(times): + + self.stop_dnode_id = self._get_stop_dnode_id() + + # print(self.stop_dnode_id) + # begin stop dnode + + # before_leader_infos = self.get_leader_infos( newTdSql ,db_name) + tdDnodes[self.stop_dnode_id-1].stoptaosd() + self.wait_stop_dnode_OK(newTdSql) + + # start = time.time() + # # get leader info after stop + # after_leader_infos = self.get_leader_infos(newTdSql , db_name) + + # revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) + + # # append rows of stablename when dnode stop make sure revote leaders + + # while not revote_status: + # after_leader_infos = self.get_leader_infos(newTdSql , db_name) + # revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) + + # end = time.time() + # time_cost = end - start + # tdLog.notice(" ==== revote leader of database {} cost time {} ====".format(db_name , time_cost)) + + tdLog.notice(" === this is {}_th restart taosd === ".format(loop_time)) + + # begin start dnode + tdDnodes[self.stop_dnode_id-1].starttaosd() + self.wait_start_dnode_OK(newTdSql) + time.sleep(5) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def run(self): + + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + restart_servers = threading.Thread(target = self.loop_restart_follower_constantly, args = (self.restart_server_times ,self.db_name)) + restart_servers.start() + + # reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + # reading.start() + + writing.join() + # reading.join() + restart_servers.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py new file mode 100644 index 0000000000..edf2c7b488 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py @@ -0,0 +1,300 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 10 + self.tb_nums = 10 + self.row_nums = 1000 + self.max_restart_time = 20 + self.restart_server_times = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + if row_num % (int(row_nums*0.1)) == 0 : + tdLog.notice( " === database {} writing records {} rows".format(dbname , row_num ) ) + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + def _get_stop_dnode_id(self , dbname , dnode_role): + + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] + # print(vgroup_info) + for ind ,role in enumerate(leader_infos): + if role == dnode_role: + # print(ind,leader_infos) + self.stop_dnode_id = leader_infos[ind-1] + break + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def check_insert_status(self, newTdSql , dbname, tb_nums , row_nums): + + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + + def loop_restart_follower_constantly(self, times , db_name): + + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() + + for loop_time in range(times): + + self.stop_dnode_id = self._get_stop_dnode_id(db_name , "follower") + + print(self.stop_dnode_id) + # begin stop dnode + start = time.time() + tdDnodes[self.stop_dnode_id-1].stoptaosd() + self.wait_stop_dnode_OK(newTdSql) + + tdLog.notice(" === this is {}_th restart taosd === ".format(loop_time)) + + # begin start dnode + tdDnodes[self.stop_dnode_id-1].starttaosd() + self.wait_start_dnode_OK(newTdSql) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def run(self): + + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + restart_servers = threading.Thread(target = self.loop_restart_follower_constantly, args = (self.restart_server_times ,self.db_name)) + restart_servers.start() + + reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + writing.join() + reading.join() + restart_servers.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py new file mode 100644 index 0000000000..3d2be11b90 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py @@ -0,0 +1,354 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 10 + self.tb_nums = 10 + self.row_nums = 1000 + self.max_restart_time = 20 + self.restart_server_times = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + if row_num % (int(row_nums*0.1)) == 0 : + tdLog.notice( " === database {} writing records {} rows".format(dbname , row_num ) ) + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + def _get_stop_dnode_id(self , dbname , dnode_role): + + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] + # print(vgroup_info) + for ind ,role in enumerate(leader_infos): + if role == dnode_role: + # print(ind,leader_infos) + self.stop_dnode_id = leader_infos[ind-1] + break + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def get_leader_infos(self , newTdSql , dbname): + + # newTdSql=tdCom.newTdSql() + newTdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = newTdSql.queryResult + + leader_infos = set() + for vgroup_info in vgroup_infos: + leader_infos.add(vgroup_info[3:-4]) + + return leader_infos + + def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos): + + check_status = False + vote_act = set(set(after_leader_infos)-set(before_leader_infos)) + if not vote_act: + print("=======before_revote_leader_infos ======\n" , before_leader_infos) + print("=======after_revote_leader_infos ======\n" , after_leader_infos) + tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====") + else: + for vgroup_info in vote_act: + for ind , role in enumerate(vgroup_info): + if role==self.stop_dnode_id: + + if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: + tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1])) + check_status = True + elif vgroup_info[ind+1] !="offline": + tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id)) + else: + continue + break + return check_status + + + def check_insert_status(self, newTdSql , dbname, tb_nums , row_nums): + + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + + def loop_restart_follower_constantly(self, times , db_name): + + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() + + for loop_time in range(times): + + self.stop_dnode_id = self._get_stop_dnode_id(db_name , "leader") + + # print(self.stop_dnode_id) + # begin stop dnode + start = time.time() + + before_leader_infos = self.get_leader_infos( newTdSql ,db_name) + tdDnodes[self.stop_dnode_id-1].stoptaosd() + self.wait_stop_dnode_OK(newTdSql) + + start = time.time() + # get leader info after stop + after_leader_infos = self.get_leader_infos(newTdSql , db_name) + + revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) + + # append rows of stablename when dnode stop make sure revote leaders + + while not revote_status: + after_leader_infos = self.get_leader_infos(newTdSql , db_name) + revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) + + end = time.time() + time_cost = end - start + tdLog.notice(" ==== revote leader of database {} cost time {} ====".format(db_name , time_cost)) + + tdLog.notice(" === this is {}_th restart taosd === ".format(loop_time)) + + # begin start dnode + tdDnodes[self.stop_dnode_id-1].starttaosd() + self.wait_start_dnode_OK(newTdSql) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def run(self): + + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + restart_servers = threading.Thread(target = self.loop_restart_follower_constantly, args = (self.restart_server_times ,self.db_name)) + restart_servers.start() + + reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + writing.join() + reading.join() + restart_servers.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py index 3d6b548bdd..58606ee67f 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py @@ -220,14 +220,16 @@ class TDTestCase: status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + + def _get_stop_dnode_id(self , dbname , role): + tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py index c19a308f1c..3aeb751ab1 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py @@ -451,6 +451,7 @@ class TDTestCase: # append rows of stablename when dnode stop make sure revote leaders while not revote_status: + after_leader_infos = self.get_leader_infos(db_name) revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py index 41606946f6..03dbe9cac8 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py @@ -197,10 +197,10 @@ class TDTestCase: return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -242,10 +242,10 @@ class TDTestCase: break return check_status - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -351,9 +351,9 @@ class TDTestCase: return check_status - def get_leader_infos(self ,dbname): + def get_leader_infos(self , newTdSql ,dbname): + - newTdSql=tdCom.newTdSql() newTdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = newTdSql.queryResult @@ -412,36 +412,36 @@ class TDTestCase: # let query task start self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) + newTdSql=tdCom.newTdSql() # force stop follower for loop in range(self.loop_restart_times): tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name)) # get leader info before stop - before_leader_infos = self.get_leader_infos(self.db_name) + before_leader_infos = self.get_leader_infos(newTdSql , self.db_name) self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) tdDnodes[self.stop_dnode_id-1].stoptaosd() - start = time.time() # get leader info after stop - after_leader_infos = self.get_leader_infos(self.db_name) + after_leader_infos = self.get_leader_infos(newTdSql , self.db_name) revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos) while not revote_status: - after_leader_infos = self.get_leader_infos(self.db_name) + after_leader_infos = self.get_leader_infos(newTdSql , self.db_name) revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos) end = time.time() time_cost = end - start tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost)) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) From 2b599bfcf1108fb0d18bd54c18c101f85159139d Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 26 Jul 2022 18:14:54 +0800 Subject: [PATCH 04/63] add test case for stop all dnodes --- ...ic_replica3_insertdatas_stop_all_dnodes.py | 393 ++++++++++++++++++ 1 file changed, 393 insertions(+) create mode 100644 tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py new file mode 100644 index 0000000000..36abe973f8 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py @@ -0,0 +1,393 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import datetime +import inspect +import time +import socket +import subprocess +import threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.ts_step =1000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 100 + self.stop_dnode_id = None + self.loop_restart_times = 5 + self.current_thread = None + self.max_restart_time = 10 + self.try_check_times = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_database(self, dbname, replica_num ,vgroup_nums ): + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + tdSql.execute(drop_db_sql) + tdSql.execute(create_db_sql) + tdSql.execute("use {}".format(dbname)) + + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): + tdSql.execute("use {}".format(dbname)) + tdSql.execute( + '''create table {} + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + '''.format(stablename) + ) + + for i in range(tb_nums): + sub_tbname = "sub_{}_{}".format(stablename,i) + tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + self.ts_step*row_num + tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) + + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): + + tdSql.execute("use {}".format(dbname)) + + for row_num in range(append_nums): + tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename)) + os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) + + def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): + + tdSql.execute("use {}".format(dbname)) + + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + + status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) + + count = 0 + while not status_OK : + if count > self.try_check_times: + os.system("taos -s ' show {}.vgroups; '".format(dbname)) + tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname)) + break + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) + tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) + count += 1 + + + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) + count = 0 + while not status_OK : + if count > self.try_check_times: + os.system("taos -s ' show {}.vgroups;'".format(dbname)) + tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname)) + break + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) + tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) + count += 1 + + def _get_stop_dnode_id(self , dbname , role): + + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] + # print(vgroup_info) + for ind ,role in enumerate(leader_infos): + if role == role: + # print(ind,leader_infos) + self.stop_dnode_id = leader_infos[ind-1] + break + + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self ,newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def _parse_datetime(self,timestr): + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f') + except ValueError: + pass + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S') + except ValueError: + pass + + def mycheckRowCol(self, sql, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, sql, row) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args) + if col < 0: + args = (caller.filename, caller.lineno, sql, row) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args) + if row > tdSql.queryRows: + args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col > tdSql.queryCols: + args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) + + def mycheckData(self, sql ,row, col, data): + check_status = True + self.mycheckRowCol(sql ,row, col) + if tdSql.queryResult[row][col] != data: + if tdSql.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed + if (len(data) >= 28): + if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + else: + if tdSql.queryResult[row][col] == self._parse_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + + if str(tdSql.queryResult[row][col]) == str(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001: + tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data) + tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + + check_status = False + + if data is None: + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + elif isinstance(data, str): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + # elif isinstance(data, datetime.date): + # tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + # (sql, row, col, tdSql.queryResult[row][col], data)) + elif isinstance(data, float): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + else: + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" % + (sql, row, col, tdSql.queryResult[row][col], data)) + + return check_status + + def mycheckRows(self, sql, expectRows): + check_status = True + if len(tdSql.queryResult) == expectRows: + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows)) + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows) + tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + check_status = False + return check_status + + def stop_All(self): + + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() + # ==== stop all dnode ===== + for k ,v in self.dnode_list.items(): + dnode_id = v[0] + tdDnodes[dnode_id-1].stoptaosd() + self.wait_stop_dnode_OK(newTdSql) + + def start_All(self): + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() + for k ,v in self.dnode_list.items(): + dnode_id = v[0] + start = time.time() + tdDnodes[dnode_id-1].starttaosd() + self.wait_start_dnode_OK(newTdSql) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def stop_All_dnodes_check_datas(self): + + # stop follower and insert datas , update tables and create new stables + self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) + self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums ) + self.check_insert_rows(self.db_name ,'stb1' ,tb_nums = self.tb_nums , row_nums= self.row_nums ,append_rows=0) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + for i in range(self.loop_restart_times): + # begin to stop All taosd + self.stop_All() + # begin to start All taosd + self.start_All() + + tdLog.debug(" ==== cluster has restart , this is {}_th restart cluster ==== ".format(i)) + + self.check_insert_rows(self.db_name ,'stb1' ,tb_nums = self.tb_nums , row_nums= self.row_nums ,append_rows=0) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + self.append_rows_of_exists_tables(self.db_name ,"stb1" , "sub_stb1_0" , 100 ) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + + def run(self): + + # basic insert and check of cluster + self.check_setup_cluster_status() + self.stop_All_dnodes_check_datas() + + + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From e83401527d984c9af9e9cf2edbee80c41c6790ce Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 26 Jul 2022 18:49:11 +0800 Subject: [PATCH 05/63] updatre --- ...4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py index 36abe973f8..bd1ab6ebba 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py @@ -336,7 +336,7 @@ class TDTestCase: for k ,v in self.dnode_list.items(): dnode_id = v[0] tdDnodes[dnode_id-1].stoptaosd() - self.wait_stop_dnode_OK(newTdSql) + # self.wait_stop_dnode_OK(newTdSql) def start_All(self): tdDnodes = cluster.dnodes @@ -345,7 +345,7 @@ class TDTestCase: dnode_id = v[0] start = time.time() tdDnodes[dnode_id-1].starttaosd() - self.wait_start_dnode_OK(newTdSql) + # self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: From da834ea55b640a54150d8dc0d5a38542a4c71074 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 26 Jul 2022 18:50:37 +0800 Subject: [PATCH 06/63] update --- ...4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py index bd1ab6ebba..80de212af4 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py @@ -331,7 +331,7 @@ class TDTestCase: def stop_All(self): tdDnodes = cluster.dnodes - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() # ==== stop all dnode ===== for k ,v in self.dnode_list.items(): dnode_id = v[0] @@ -340,7 +340,7 @@ class TDTestCase: def start_All(self): tdDnodes = cluster.dnodes - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() for k ,v in self.dnode_list.items(): dnode_id = v[0] start = time.time() From be16074c965a042285ecbfb09a0e6f67755c1f5e Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 26 Jul 2022 18:55:55 +0800 Subject: [PATCH 07/63] add test case about force stop --- ...lica3_insertdatas_force_stop_all_dnodes.py | 413 ++++++++++++++++++ 1 file changed, 413 insertions(+) create mode 100644 tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py new file mode 100644 index 0000000000..bbe440dbdd --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py @@ -0,0 +1,413 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import datetime +import inspect +import time +import socket +import subprocess +import threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.ts_step =1000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 100 + self.stop_dnode_id = None + self.loop_restart_times = 5 + self.current_thread = None + self.max_restart_time = 10 + self.try_check_times = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_database(self, dbname, replica_num ,vgroup_nums ): + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + tdSql.execute(drop_db_sql) + tdSql.execute(create_db_sql) + tdSql.execute("use {}".format(dbname)) + + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): + tdSql.execute("use {}".format(dbname)) + tdSql.execute( + '''create table {} + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + '''.format(stablename) + ) + + for i in range(tb_nums): + sub_tbname = "sub_{}_{}".format(stablename,i) + tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + self.ts_step*row_num + tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) + + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): + + tdSql.execute("use {}".format(dbname)) + + for row_num in range(append_nums): + tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename)) + os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) + + def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): + + tdSql.execute("use {}".format(dbname)) + + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + + status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) + + count = 0 + while not status_OK : + if count > self.try_check_times: + os.system("taos -s ' show {}.vgroups; '".format(dbname)) + tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname)) + break + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) + tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) + count += 1 + + + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) + count = 0 + while not status_OK : + if count > self.try_check_times: + os.system("taos -s ' show {}.vgroups;'".format(dbname)) + tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname)) + break + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) + tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) + count += 1 + + def _get_stop_dnode_id(self , dbname , role): + + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] + # print(vgroup_info) + for ind ,role in enumerate(leader_infos): + if role == role: + # print(ind,leader_infos) + self.stop_dnode_id = leader_infos[ind-1] + break + + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self ,newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def _parse_datetime(self,timestr): + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f') + except ValueError: + pass + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S') + except ValueError: + pass + + def mycheckRowCol(self, sql, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, sql, row) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args) + if col < 0: + args = (caller.filename, caller.lineno, sql, row) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args) + if row > tdSql.queryRows: + args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col > tdSql.queryCols: + args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) + + def mycheckData(self, sql ,row, col, data): + check_status = True + self.mycheckRowCol(sql ,row, col) + if tdSql.queryResult[row][col] != data: + if tdSql.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed + if (len(data) >= 28): + if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + else: + if tdSql.queryResult[row][col] == self._parse_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + + if str(tdSql.queryResult[row][col]) == str(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001: + tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data) + tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + + check_status = False + + if data is None: + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + elif isinstance(data, str): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + # elif isinstance(data, datetime.date): + # tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + # (sql, row, col, tdSql.queryResult[row][col], data)) + elif isinstance(data, float): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + else: + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" % + (sql, row, col, tdSql.queryResult[row][col], data)) + + return check_status + + def mycheckRows(self, sql, expectRows): + check_status = True + if len(tdSql.queryResult) == expectRows: + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows)) + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows) + tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + check_status = False + return check_status + + def force_stop_dnode(self, dnode_id ): + + tdSql.query("show dnodes") + port = None + for dnode_info in tdSql.queryResult: + if dnode_id == dnode_info[0]: + port = dnode_info[1].split(":")[-1] + break + else: + continue + if port: + tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id)) + psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + ps_kill_taosd = ''' kill -9 {} '''.format(processID) + # print(ps_kill_taosd) + os.system(ps_kill_taosd) + + def stop_All(self): + + tdDnodes = cluster.dnodes + # newTdSql=tdCom.newTdSql() + # ==== stop all dnode ===== + for k ,v in self.dnode_list.items(): + dnode_id = v[0] + # tdDnodes[dnode_id-1].stoptaosd() + self.force_stop_dnode(dnode_id) + # self.wait_stop_dnode_OK(newTdSql) + + def start_All(self): + tdDnodes = cluster.dnodes + # newTdSql=tdCom.newTdSql() + for k ,v in self.dnode_list.items(): + dnode_id = v[0] + start = time.time() + tdDnodes[dnode_id-1].starttaosd() + # self.wait_start_dnode_OK(newTdSql) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def stop_All_dnodes_check_datas(self): + + # stop follower and insert datas , update tables and create new stables + self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) + self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums ) + self.check_insert_rows(self.db_name ,'stb1' ,tb_nums = self.tb_nums , row_nums= self.row_nums ,append_rows=0) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + for i in range(self.loop_restart_times): + # begin to stop All taosd + self.stop_All() + # begin to start All taosd + self.start_All() + + tdLog.debug(" ==== cluster has restart , this is {}_th restart cluster ==== ".format(i)) + + self.check_insert_rows(self.db_name ,'stb1' ,tb_nums = self.tb_nums , row_nums= self.row_nums ,append_rows=0) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + self.append_rows_of_exists_tables(self.db_name ,"stb1" , "sub_stb1_0" , 100 ) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + + def run(self): + + # basic insert and check of cluster + self.check_setup_cluster_status() + self.stop_All_dnodes_check_datas() + + + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 580cfc7d82df2837151f9515187c6890fed79279 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Wed, 27 Jul 2022 11:17:52 +0800 Subject: [PATCH 08/63] update case --- ...asic_replica3_insertdatas_force_stop_all_dnodes.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py index bbe440dbdd..5cb9277c24 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py @@ -330,14 +330,13 @@ class TDTestCase: def force_stop_dnode(self, dnode_id ): - tdSql.query("show dnodes") port = None - for dnode_info in tdSql.queryResult: - if dnode_id == dnode_info[0]: - port = dnode_info[1].split(":")[-1] - break + for k ,v in self.dnode_list.items(): + if v[0] == dnode_id: + port = k.split(":")[-1] else: continue + if port: tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id)) psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port) @@ -346,6 +345,8 @@ class TDTestCase: ps_kill_taosd = ''' kill -9 {} '''.format(processID) # print(ps_kill_taosd) os.system(ps_kill_taosd) + else : + tdLog.exit(" ==== port of dnode {} not found ====".format(dnode_id)) def stop_All(self): From 236af882fe19b1cedb1216f40dfac4cf95c89f66 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Wed, 27 Jul 2022 11:46:27 +0800 Subject: [PATCH 09/63] update case --- ...asic_replica3_mnode3_insertdatas_querys.py | 217 ++++++++++++++++++ 1 file changed, 217 insertions(+) create mode 100644 tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py new file mode 100644 index 0000000000..daa47c86e7 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py @@ -0,0 +1,217 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 2000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + + def check_insert_status(self, dbname, tb_nums , row_nums): + newTdSql=tdCom.newTdSql() + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + self.check_insert_status( db_name, tb_nums , row_nums) + + def loop_create_databases(self, times , tb_nums , row_nums): + + for loop_time in range(times): + tdLog.debug(" === create database and insert datas is going ,this is {}_th create === ".format(loop_time)) + db_name = 'loop_db_{}'.format(loop_time) + self.create_db_replica_3_insertdatas(db_name , self.replica , self.vgroups , tb_nums , row_nums) + self.check_insert_status( db_name, tb_nums , row_nums) + + def run(self): + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # create mnode + tdSql.execute("create mnode on dnode 2 ") + tdSql.execute("create mnode on dnode 3 ") + os.system("taos -s 'show mnodes;'") + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + create_db = threading.Thread(target = self.loop_create_databases, args=(10, 10 , 10)) + create_db.start() + + writing.join() + reading.join() + create_db.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 9cb3f9b3af25befc2130eac1df6df34b562ba8da Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Wed, 27 Jul 2022 15:05:44 +0800 Subject: [PATCH 10/63] solve conflix --- tests/system-test/2-query/max_partition.py | 227 +++++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100644 tests/system-test/2-query/max_partition.py diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py new file mode 100644 index 0000000000..4b9996d9c3 --- /dev/null +++ b/tests/system-test/2-query/max_partition.py @@ -0,0 +1,227 @@ +# author : wenzhouwww +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.row_nums = 10 + self.tb_nums = 10 + self.ts = 1537146000000 + + def prepare_datas(self, stb_name , tb_nums , row_nums ): + tdSql.execute(" use db ") + tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\ + uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\ + , t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ") + + for i in range(tb_nums): + tbname = f"sub_{stb_name}_{i}" + ts = self.ts + i*10000 + tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )") + + for row in range(row_nums): + ts = self.ts + row*1000 + tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )") + + for null in range(5): + ts = self.ts + row_nums*1000 + null*1000 + tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )") + + def basic_query(self): + tdSql.query("select count(*) from stb") + tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums) + tdSql.query("select max(c1) from stb") + tdSql.checkData(0,0,(self.row_nums -1)) + tdSql.query(" select tbname , max(c1) from stb partition by tbname ") + tdSql.checkRows(self.tb_nums) + tdSql.query(" select max(c1) from stb group by t1 order by t1 ") + tdSql.checkRows(self.tb_nums) + tdSql.query(" select max(c1) from stb group by c1 order by t1 ") + tdSql.query(" select max(t2) from stb group by c1 order by t1 ") + tdSql.query(" select max(c1) from stb group by tbname order by tbname ") + tdSql.checkRows(self.tb_nums) + # bug need fix + tdSql.query(" select max(t2) from stb group by t2 order by t2 ") + tdSql.checkRows(self.tb_nums) + tdSql.query(" select max(c1) from stb group by c1 order by c1 ") + tdSql.checkRows(self.row_nums+1) + + tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ") + tdSql.checkRows(self.row_nums+1) + + # support selective functions + tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ") + tdSql.checkRows(self.row_nums+1) + + tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ") + tdSql.checkRows(self.row_nums+1) + + # bug need fix + # tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ") + # tdSql.checkRows(1) + # tdSql.checkData(0,0,"sub_stb_1") + + tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)") + tdSql.checkRows(self.row_nums+1) + tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)") + tdSql.checkRows(self.row_nums+1) + tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)") + tdSql.checkRows(self.row_nums+1) + tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2") + tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2") + tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)") + tdSql.checkRows(self.row_nums+1) + + tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ") + tdSql.checkRows(2) + tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ") + tdSql.checkRows(2) + + tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ") + tdSql.checkRows(self.tb_nums) + tdSql.checkData(0,1,self.row_nums-1) + + tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1") + tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1") + tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2") + + # # bug need fix + tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2") + tdSql.checkRows(self.tb_nums) + + tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname") + tdSql.checkRows(self.tb_nums) + tdSql.checkData(0,1,self.row_nums-1) + + + tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2") + + tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc") + tdSql.checkRows(self.tb_nums+1) + tdSql.checkData(0,1,self.row_nums-1) + + tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2") + + + tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2") + tdSql.checkRows(self.tb_nums*(self.row_nums+5)) + + tdSql.query("select max(c1) , count(t2) from stb partition by c2 ") + tdSql.checkRows(self.row_nums+1) + tdSql.checkData(0,1,self.row_nums) + + tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2") + tdSql.checkRows(self.row_nums+1) + + tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname") + tdSql.checkRows(self.tb_nums) + tdSql.checkCols(4) + + tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1") + tdSql.checkRows(self.tb_nums) + tdSql.checkData(0,0,self.row_nums) + + # bug need fix + tdSql.query("select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)") + tdSql.checkRows(self.row_nums+1) + + + tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)") + tdSql.checkRows(self.row_nums+1) + + + tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))") + tdSql.checkRows(self.row_nums+1) + + tdSql.query("select tbname , max(c1) ,c1 from stb partition by tbname order by tbname") + tdSql.checkRows(self.tb_nums) + tdSql.checkData(0,0,'sub_stb_0') + tdSql.checkData(0,1,9) + tdSql.checkData(0,2,9) + + tdSql.query("select tbname ,top(c1,1) ,c1 from stb partition by tbname order by tbname") + tdSql.checkRows(self.tb_nums) + + tdSql.query(" select c1 , sample(c1,2) from stb partition by tbname order by tbname ") + tdSql.checkRows(self.tb_nums*2) + + + # interval + tdSql.query("select max(c1) from stb interval(2s) sliding(1s)") + + # bug need fix + + tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)') + + tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ") + + tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)") + tdSql.checkRows(self.row_nums*2) + + tdSql.query("select unique(c1) from stb partition by tbname order by tbname") + + tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)") + tdSql.checkData(0,0,'sub_stb_1') + tdSql.checkData(0,1,self.row_nums) + + tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1") + tdSql.checkRows(90) + + tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1") + tdSql.checkRows(90) + + tdSql.query("select c1 , csum(c1) from stb partition by c1") + tdSql.checkRows(100) + + tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1") + tdSql.checkRows(21) + # bug need fix + # tdSql.checkData(0,1,None) + + tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1") + tdSql.checkRows(11) + tdSql.checkData(0,1,None) + + tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1") + tdSql.checkRows(11) + tdSql.checkData(0,1,None) + + tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1") + tdSql.checkRows(90) + # bug need fix + tdSql.checkData(0,1,None) + + + tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ") + tdSql.checkRows(10) + + tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ") + + tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') + tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') + + + def run(self): + tdSql.prepare() + self.prepare_datas("stb",self.tb_nums,self.row_nums) + self.basic_query() + + # # coverage case for taosd crash about bug fix + tdSql.query(" select sum(c1) from stb where t2+10 >1 ") + tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ") + tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ") + tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ") + tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From f1e28fb865a0a93e9e4f85317e70913e2496fc33 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Wed, 27 Jul 2022 17:09:41 +0800 Subject: [PATCH 11/63] update case --- ...replica3_insertdatas_stop_follower_sync.py | 21 ++++++++++--------- ...plica3_insertdatas_stop_follower_unsync.py | 18 +++++++++------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py index 58606ee67f..68986e0f63 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py @@ -221,7 +221,7 @@ class TDTestCase: tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self , dbname , role): + def _get_stop_dnode_id(self , dbname , dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult @@ -229,7 +229,7 @@ class TDTestCase: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role == role: + if dnode_role == role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break @@ -237,10 +237,10 @@ class TDTestCase: return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -260,10 +260,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -371,12 +371,13 @@ class TDTestCase: def sync_run_case(self): # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes + newTdSql = tdCom.newTdSql() for loop in range(self.loop_restart_times): db_name = "sync_db_{}".format(loop) stablename = 'stable_{}'.format(loop) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) - self.stop_dnode_id = self._get_stop_dnode_id(db_name) + self.stop_dnode_id = self._get_stop_dnode_id(db_name , "follower") # check rows of datas @@ -386,7 +387,7 @@ class TDTestCase: start = time.time() tdDnodes[self.stop_dnode_id-1].stoptaosd() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) # append rows of stablename when dnode stop @@ -404,7 +405,7 @@ class TDTestCase: # begin start dnode tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: @@ -421,7 +422,7 @@ class TDTestCase: def _restart_dnode_of_db_unsync(dbname): start = time.time() tdDnodes=cluster.dnodes - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + self.stop_dnode_id = self._get_stop_dnode_id(dbname ,"follower") # begin restart dnode tdDnodes[self.stop_dnode_id-1].stoptaosd() self.wait_stop_dnode_OK() diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py index 8ec6349879..32534ee9c0 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py @@ -236,10 +236,10 @@ class TDTestCase: return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -259,10 +259,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -370,6 +370,7 @@ class TDTestCase: def sync_run_case(self): # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes + newTdSql=tdCom.newTdSql() for loop in range(self.loop_restart_times): db_name = "sync_db_{}".format(loop) stablename = 'stable_{}'.format(loop) @@ -385,7 +386,7 @@ class TDTestCase: start = time.time() tdDnodes[self.stop_dnode_id-1].stoptaosd() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) # append rows of stablename when dnode stop @@ -403,7 +404,7 @@ class TDTestCase: # begin start dnode tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: @@ -418,14 +419,15 @@ class TDTestCase: def unsync_run_case(self): def _restart_dnode_of_db_unsync(dbname): + newTdSql=tdCom.newTdSql() start = time.time() tdDnodes=cluster.dnodes self.stop_dnode_id = self._get_stop_dnode_id(dbname) # begin restart dnode tdDnodes[self.stop_dnode_id-1].stoptaosd() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) From 2d3b32bcebe559d13e6ae4368b0a85590b6a7dc1 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 28 Jul 2022 09:25:22 +0800 Subject: [PATCH 12/63] update case --- ...ode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py index 32534ee9c0..3877649be8 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py @@ -377,6 +377,7 @@ class TDTestCase: self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) self.stop_dnode_id = self._get_stop_dnode_id(db_name) + print("dnode_id:" , self.stop_dnode_id ) # check rows of datas @@ -423,6 +424,7 @@ class TDTestCase: start = time.time() tdDnodes=cluster.dnodes self.stop_dnode_id = self._get_stop_dnode_id(dbname) + print("dnode_id:" , self.stop_dnode_id ) # begin restart dnode tdDnodes[self.stop_dnode_id-1].stoptaosd() self.wait_stop_dnode_OK(newTdSql) From 6c1cd18c655ccf892701c8d83772dbd76dbf5963 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 28 Jul 2022 09:33:58 +0800 Subject: [PATCH 13/63] update case --- ...e1mnode_basic_replica3_insertdatas_stop_follower_unsync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py index 3877649be8..74bd9f5a44 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py @@ -377,7 +377,7 @@ class TDTestCase: self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) self.stop_dnode_id = self._get_stop_dnode_id(db_name) - print("dnode_id:" , self.stop_dnode_id ) + # print("dnode_id:" , self.stop_dnode_id ) # check rows of datas @@ -424,7 +424,7 @@ class TDTestCase: start = time.time() tdDnodes=cluster.dnodes self.stop_dnode_id = self._get_stop_dnode_id(dbname) - print("dnode_id:" , self.stop_dnode_id ) + # print("dnode_id:" , self.stop_dnode_id ) # begin restart dnode tdDnodes[self.stop_dnode_id-1].stoptaosd() self.wait_stop_dnode_OK(newTdSql) From 85552c46bca568f5c9a1c819a377da6dedfb5921 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 28 Jul 2022 17:22:24 +0800 Subject: [PATCH 14/63] update case and push to CI run --- ...mnode_basic_replica1_insertdatas_querys.py | 13 ++--- ...lica3_insertdatas_force_stop_all_dnodes.py | 22 +++++++-- ...mnode_basic_replica3_insertdatas_querys.py | 15 +++--- ...sertdatas_querys_loop_restart_all_vnode.py | 3 +- ...nsertdatas_querys_loop_restart_follower.py | 19 +++++++- ..._insertdatas_querys_loop_restart_leader.py | 20 ++++++-- ...ic_replica3_insertdatas_stop_all_dnodes.py | 22 +++++++-- ...replica3_insertdatas_stop_follower_sync.py | 23 +++++++-- ...plica3_insertdatas_stop_follower_unsync.py | 25 ++++++++-- ...rtdatas_stop_follower_unsync_force_stop.py | 48 +++++++++++++------ ..._basic_replica3_insertdatas_stop_leader.py | 36 +++++++++----- ...ca3_insertdatas_stop_leader_forece_stop.py | 48 ++++++++++++------- ...asic_replica3_mnode3_insertdatas_querys.py | 11 +++-- ...basic_replica3_querydatas_stop_follower.py | 36 ++++++++++---- ...ca3_querydatas_stop_follower_force_stop.py | 21 ++++++-- ...e_basic_replica3_querydatas_stop_leader.py | 21 ++++++-- ...lica3_querydatas_stop_leader_force_stop.py | 36 ++++++++++---- tests/system-test/clusterCase.sh | 26 +++++++++- 18 files changed, 337 insertions(+), 108 deletions(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py index df3e6f24e9..dafd4d7745 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py @@ -29,7 +29,8 @@ class TDTestCase: self.replica = 1 self.vgroups = 1 self.tb_nums = 10 - self.row_nums = 2000 + self.row_nums = 1000 + self.query_times = 1000 def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -160,8 +161,8 @@ class TDTestCase: - def check_insert_status(self, dbname, tb_nums , row_nums): - newTdSql=tdCom.newTdSql() + def check_insert_status(self, newTdSql ,dbname, tb_nums , row_nums): + # newTdSql=tdCom.newTdSql() newTdSql.execute("use {}".format(dbname)) newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) # tdSql.checkData(0 , 0 , tb_nums*row_nums) @@ -169,10 +170,10 @@ class TDTestCase: # tdSql.checkRows(tb_nums) def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): - + newTdSql=tdCom.newTdSql() for loop_time in range(times): tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) - self.check_insert_status( db_name, tb_nums , row_nums) + self.check_insert_status( newTdSql , db_name, tb_nums , row_nums) def run(self): self.check_setup_cluster_status() @@ -187,7 +188,7 @@ class TDTestCase: time.sleep(0.1) tdSql.query(" show {}.stables ".format(self.db_name)) - reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) reading.start() writing.join() diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py index 5cb9277c24..3d01015af6 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py @@ -181,20 +181,34 @@ class TDTestCase: tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self , dbname , role): - + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role == role: + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id def wait_stop_dnode_OK(self ,newTdSql ): diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py index b92378060f..3649617c21 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py @@ -29,7 +29,8 @@ class TDTestCase: self.replica = 3 self.vgroups = 1 self.tb_nums = 10 - self.row_nums = 2000 + self.row_nums = 1000 + self.query_times = 1000 def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -153,6 +154,8 @@ class TDTestCase: # insert datas about new database for row_num in range(row_nums): + if row_num %100 ==0: + tdLog.info(" === writing is going now === ") ts = self.ts + 1000*row_num newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -160,8 +163,8 @@ class TDTestCase: - def check_insert_status(self, dbname, tb_nums , row_nums): - newTdSql=tdCom.newTdSql() + def check_insert_status(self, newTdSql ,dbname, tb_nums , row_nums): + newTdSql.execute("use {}".format(dbname)) newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) # tdSql.checkData(0 , 0 , tb_nums*row_nums) @@ -169,10 +172,10 @@ class TDTestCase: # tdSql.checkRows(tb_nums) def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): - + newTdSql=tdCom.newTdSql() for loop_time in range(times): tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) - self.check_insert_status( db_name, tb_nums , row_nums) + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) def run(self): self.check_setup_cluster_status() @@ -187,7 +190,7 @@ class TDTestCase: time.sleep(0.1) tdSql.query(" show {}.stables ".format(self.db_name)) - reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) reading.start() writing.join() diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py index d693e0c38d..db05eca9ce 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py @@ -29,7 +29,7 @@ class TDTestCase: self.replica = 3 self.vgroups = 1 self.tb_nums = 10 - self.row_nums = 1000 + self.row_nums = 500 self.max_restart_time = 20 self.restart_server_times = 10 self.dnode_index = 0 @@ -312,6 +312,7 @@ class TDTestCase: tdLog.notice(" === this is {}_th restart taosd === ".format(loop_time)) # begin start dnode + start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() self.wait_start_dnode_OK(newTdSql) time.sleep(5) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py index edf2c7b488..19f9179e1e 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py @@ -163,10 +163,25 @@ class TDTestCase: tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) - def _get_stop_dnode_id(self , dbname , dnode_role): - + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py index 3d2be11b90..f7d80766c7 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py @@ -162,11 +162,25 @@ class TDTestCase: tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) - - def _get_stop_dnode_id(self , dbname , dnode_role): - + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py index 80de212af4..63c4942c9e 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py @@ -181,20 +181,34 @@ class TDTestCase: tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self , dbname , role): - + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role == role: + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id def wait_stop_dnode_OK(self ,newTdSql ): diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py index 68986e0f63..9bb6c6b066 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py @@ -221,20 +221,35 @@ class TDTestCase: tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self , dbname , dnode_role): - + + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if dnode_role == role: + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id def wait_stop_dnode_OK(self ,newTdSql): diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py index 74bd9f5a44..dba5e4582b 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py @@ -221,19 +221,34 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id def wait_stop_dnode_OK(self ,newTdSql): @@ -376,7 +391,7 @@ class TDTestCase: stablename = 'stable_{}'.format(loop) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) - self.stop_dnode_id = self._get_stop_dnode_id(db_name) + self.stop_dnode_id = self._get_stop_dnode_id(db_name ,"follower") # print("dnode_id:" , self.stop_dnode_id ) # check rows of datas @@ -423,7 +438,7 @@ class TDTestCase: newTdSql=tdCom.newTdSql() start = time.time() tdDnodes=cluster.dnodes - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + self.stop_dnode_id = self._get_stop_dnode_id(dbname,"follower") # print("dnode_id:" , self.stop_dnode_id ) # begin restart dnode tdDnodes[self.stop_dnode_id-1].stoptaosd() diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py index fa7e5292de..08c3eb54f8 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py @@ -221,25 +221,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self , newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -259,10 +274,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self , newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -370,12 +385,13 @@ class TDTestCase: def sync_run_case(self): # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes + newTdSql=tdCom.newTdSql() for loop in range(self.loop_restart_times): db_name = "sync_db_{}".format(loop) stablename = 'stable_{}'.format(loop) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) - self.stop_dnode_id = self._get_stop_dnode_id(db_name) + self.stop_dnode_id = self._get_stop_dnode_id(db_name,"follower") # check rows of datas @@ -385,7 +401,7 @@ class TDTestCase: start = time.time() tdDnodes[self.stop_dnode_id-1].forcestop() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) # append rows of stablename when dnode stop @@ -403,7 +419,7 @@ class TDTestCase: # begin start dnode tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: @@ -420,17 +436,21 @@ class TDTestCase: def _restart_dnode_of_db_unsync(dbname): start = time.time() tdDnodes=cluster.dnodes - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + newTdSql=tdCom.newTdSql() + self.stop_dnode_id = self._get_stop_dnode_id(dbname,"follower") + # print(self.stop_dnode_id) while not self.stop_dnode_id: + # print(self.stop_dnode_id) time.sleep(0.5) - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + self.stop_dnode_id = self._get_stop_dnode_id(dbname,"follower") # begin restart dnode # force stop taosd by kill -9 self.force_stop_dnode(self.stop_dnode_id) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) + os.system(" taos -s 'show dnodes;' ") tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py index 2e4b299fc6..2d211a9584 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py @@ -177,26 +177,40 @@ class TDTestCase: continue - def _get_stop_dnode_id(self,dbname): - newTdSql=tdCom.newTdSql() - newTdSql.query("show {}.vgroups".format(dbname)) - vgroup_infos = newTdSql.queryResult + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='leader': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self , newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -216,10 +230,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -316,7 +330,7 @@ class TDTestCase: tdLog.debug(" === database {} has write {} rows at least ====".format(dbname,total_rows/10)) - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + self.stop_dnode_id = self._get_stop_dnode_id(dbname ,"leader") # prepare stop leader of database before_leader_infos = self.get_leader_infos(dbname) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py index 3aeb751ab1..918a628c6b 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py @@ -304,26 +304,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): - newTdSql=tdCom.newTdSql() - newTdSql.query("show {}.vgroups".format(dbname)) - vgroup_infos = newTdSql.queryResult + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='leader': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -343,10 +357,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -421,12 +435,13 @@ class TDTestCase: def sync_run_case(self): # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes + newTdSql=tdCom.newTdSql() for loop in range(self.loop_restart_times): db_name = "sync_db_{}".format(loop) stablename = 'stable_{}'.format(loop) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) - self.stop_dnode_id = self._get_stop_dnode_id(db_name) + self.stop_dnode_id = self._get_stop_dnode_id(db_name ,"leader") # check rows of datas @@ -439,7 +454,7 @@ class TDTestCase: # force stop taosd by kill -9 self.force_stop_dnode(self.stop_dnode_id) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) # vote leaders check @@ -474,7 +489,7 @@ class TDTestCase: # begin start dnode start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: @@ -491,14 +506,15 @@ class TDTestCase: def _restart_dnode_of_db_unsync(dbname): tdDnodes=cluster.dnodes - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + newTdSql=tdCom.newTdSql() + self.stop_dnode_id = self._get_stop_dnode_id(dbname ,"leader" ) # begin restart dnode # force stop taosd by kill -9 # get leader info before stop before_leader_infos = self.get_leader_infos(db_name) self.force_stop_dnode(self.stop_dnode_id) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) # check revote leader when restart servers # get leader info after stop @@ -530,7 +546,7 @@ class TDTestCase: tdDnodes[self.stop_dnode_id-1].starttaosd() start = time.time() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py index daa47c86e7..a0d8f21cff 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py @@ -30,6 +30,7 @@ class TDTestCase: self.vgroups = 1 self.tb_nums = 10 self.row_nums = 2000 + self.query_times = 100 def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -160,8 +161,8 @@ class TDTestCase: - def check_insert_status(self, dbname, tb_nums , row_nums): - newTdSql=tdCom.newTdSql() + def check_insert_status(self,newTdSql , dbname, tb_nums , row_nums): + # newTdSql=tdCom.newTdSql() newTdSql.execute("use {}".format(dbname)) newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) # tdSql.checkData(0 , 0 , tb_nums*row_nums) @@ -175,12 +176,12 @@ class TDTestCase: self.check_insert_status( db_name, tb_nums , row_nums) def loop_create_databases(self, times , tb_nums , row_nums): - + newTdSql=tdCom.newTdSql() for loop_time in range(times): tdLog.debug(" === create database and insert datas is going ,this is {}_th create === ".format(loop_time)) db_name = 'loop_db_{}'.format(loop_time) self.create_db_replica_3_insertdatas(db_name , self.replica , self.vgroups , tb_nums , row_nums) - self.check_insert_status( db_name, tb_nums , row_nums) + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) def run(self): self.check_setup_cluster_status() @@ -199,7 +200,7 @@ class TDTestCase: time.sleep(0.1) tdSql.query(" show {}.stables ".format(self.db_name)) - reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) reading.start() create_db = threading.Thread(target = self.loop_create_databases, args=(10, 10 , 10)) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py index 2bfe544749..a8d6a2d4d1 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py @@ -182,25 +182,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -220,10 +235,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -372,6 +387,7 @@ class TDTestCase: def stop_follower_when_query_going(self): tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) @@ -381,13 +397,13 @@ class TDTestCase: # force stop follower for loop in range(self.loop_restart_times): tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name)) - self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) + self.stop_dnode_id = self._get_stop_dnode_id(self.db_name,"follower" ) tdDnodes[self.stop_dnode_id-1].stoptaosd() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py index 2a4e43d904..13ac24c000 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py @@ -182,19 +182,34 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id def wait_stop_dnode_OK(self): diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py index 03dbe9cac8..f72dfb40eb 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py @@ -182,19 +182,34 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='leader': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id def wait_stop_dnode_OK(self,newTdSql): diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py index 5ddcf1c70e..191b8eb104 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py @@ -182,25 +182,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='leader': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -242,10 +257,10 @@ class TDTestCase: break return check_status - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -406,6 +421,7 @@ class TDTestCase: def stop_follower_when_query_going(self): tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) @@ -419,7 +435,7 @@ class TDTestCase: # get leader info before stop before_leader_infos = self.get_leader_infos(self.db_name) - self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) + self.stop_dnode_id = self._get_stop_dnode_id(self.db_name ,"leader") self.force_stop_dnode(self.stop_dnode_id) @@ -437,11 +453,11 @@ class TDTestCase: time_cost = end - start tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost)) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) diff --git a/tests/system-test/clusterCase.sh b/tests/system-test/clusterCase.sh index cfc44f7f95..a4bec11d03 100755 --- a/tests/system-test/clusterCase.sh +++ b/tests/system-test/clusterCase.sh @@ -19,4 +19,28 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 # python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5 - +# test case of vnode +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py  -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py  -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py   -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 + From ca1691bd12f9ef2d884351c0fc4265f63c9b21e9 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 28 Jul 2022 17:26:27 +0800 Subject: [PATCH 15/63] run CI test --- tests/system-test/fulltest.sh | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index f074bd8850..f4532a7def 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -406,3 +406,29 @@ python3 ./test.py -f 2-query/function_null.py -Q 3 python3 ./test.py -f 2-query/count_partition.py -Q 3 python3 ./test.py -f 2-query/max_partition.py -Q 3 python3 ./test.py -f 2-query/last_row.py -Q 3 + + +# test case of vnode +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py  -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py  -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py   -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 \ No newline at end of file From d132a6d4f31bc261c0a1c1c4f9bb8fdfda5a1c92 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 28 Jul 2022 17:58:22 +0800 Subject: [PATCH 16/63] update case --- ...eplica3_querydatas_stop_follower_force_stop.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py index 13ac24c000..8e5d11999c 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py @@ -212,10 +212,10 @@ class TDTestCase: return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -235,10 +235,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -394,15 +394,16 @@ class TDTestCase: self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) # force stop follower + newTdSql=tdCom.newTdSql() for loop in range(self.loop_restart_times): tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name)) - self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) + self.stop_dnode_id = self._get_stop_dnode_id(self.db_name,"follower" ) self.force_stop_dnode(self.stop_dnode_id) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) From d42a76aeacc39ddc82040594cd473a59619568c3 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 28 Jul 2022 18:02:09 +0800 Subject: [PATCH 17/63] update case --- .../4dnode1mnode_basic_replica3_insertdatas_stop_leader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py index 2d211a9584..c941bbcdcc 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py @@ -351,7 +351,7 @@ class TDTestCase: self.current_thread.join() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1")) tdLog.debug(" ==== expected insert {} rows of database {} , really is {}".format(total_rows, dbname , tdSql.queryResult[0][0])) From f67c6213cec581efa1eb7c74bbe1dabcbfdc9204 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 28 Jul 2022 18:06:53 +0800 Subject: [PATCH 18/63] run cluster case --- tests/system-test/fulltest.sh | 406 ---------------------------------- 1 file changed, 406 deletions(-) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index f4532a7def..735398592d 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -2,412 +2,6 @@ set -e set -x -python3 ./test.py -f 0-others/taosShell.py -python3 ./test.py -f 0-others/taosShellError.py -python3 ./test.py -f 0-others/taosShellNetChk.py -python3 ./test.py -f 0-others/telemetry.py -python3 ./test.py -f 0-others/taosdMonitor.py -python3 ./test.py -f 0-others/udfTest.py -python3 ./test.py -f 0-others/udf_create.py -python3 ./test.py -f 0-others/udf_restart_taosd.py -python3 ./test.py -f 0-others/cachemodel.py -python3 ./test.py -f 0-others/udf_cfg1.py -python3 ./test.py -f 0-others/udf_cfg2.py - -python3 ./test.py -f 0-others/sysinfo.py -python3 ./test.py -f 0-others/user_control.py -python3 ./test.py -f 0-others/fsync.py - -python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py -python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py -python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py -python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py -python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py -python3 ./test.py -f 1-insert/alter_stable.py -python3 ./test.py -f 1-insert/alter_table.py -python3 ./test.py -f 1-insert/insertWithMoreVgroup.py -python3 ./test.py -f 1-insert/table_comment.py -python3 ./test.py -f 1-insert/time_range_wise.py -python3 ./test.py -f 1-insert/block_wise.py -python3 ./test.py -f 1-insert/create_retentions.py -python3 ./test.py -f 1-insert/table_param_ttl.py - -python3 ./test.py -f 1-insert/update_data_muti_rows.py - - -python3 ./test.py -f 2-query/abs.py -python3 ./test.py -f 2-query/abs.py -R -python3 ./test.py -f 2-query/and_or_for_byte.py -python3 ./test.py -f 2-query/and_or_for_byte.py -R -python3 ./test.py -f 2-query/apercentile.py -python3 ./test.py -f 2-query/apercentile.py -R -python3 ./test.py -f 2-query/arccos.py -python3 ./test.py -f 2-query/arccos.py -R -python3 ./test.py -f 2-query/arcsin.py -python3 ./test.py -f 2-query/arcsin.py -R -python3 ./test.py -f 2-query/arctan.py -python3 ./test.py -f 2-query/arctan.py -R -python3 ./test.py -f 2-query/avg.py -python3 ./test.py -f 2-query/avg.py -R -python3 ./test.py -f 2-query/between.py -python3 ./test.py -f 2-query/between.py -R -python3 ./test.py -f 2-query/bottom.py -python3 ./test.py -f 2-query/bottom.py -R -python3 ./test.py -f 2-query/cast.py -python3 ./test.py -f 2-query/cast.py -R -python3 ./test.py -f 2-query/ceil.py -python3 ./test.py -f 2-query/ceil.py -R -python3 ./test.py -f 2-query/char_length.py -python3 ./test.py -f 2-query/char_length.py -R -python3 ./test.py -f 2-query/check_tsdb.py -python3 ./test.py -f 2-query/check_tsdb.py -R - -python3 ./test.py -f 1-insert/update_data.py - -python3 ./test.py -f 1-insert/delete_data.py -python3 ./test.py -f 2-query/db.py - - -python3 ./test.py -f 2-query/db.py -python3 ./test.py -f 2-query/distinct.py -python3 ./test.py -f 2-query/varchar.py -python3 ./test.py -f 2-query/ltrim.py -python3 ./test.py -f 2-query/rtrim.py -python3 ./test.py -f 2-query/length.py -python3 ./test.py -f 2-query/upper.py -python3 ./test.py -f 2-query/lower.py -python3 ./test.py -f 2-query/join.py -python3 ./test.py -f 2-query/join2.py -python3 ./test.py -f 2-query/substr.py -python3 ./test.py -f 2-query/union.py -python3 ./test.py -f 2-query/union1.py -python3 ./test.py -f 2-query/concat.py -python3 ./test.py -f 2-query/concat2.py -python3 ./test.py -f 2-query/concat_ws.py -python3 ./test.py -f 2-query/concat_ws2.py -python3 ./test.py -f 2-query/spread.py -python3 ./test.py -f 2-query/hyperloglog.py -python3 ./test.py -f 2-query/explain.py -python3 ./test.py -f 2-query/leastsquares.py -python3 ./test.py -f 2-query/histogram.py - - -python3 ./test.py -f 2-query/timezone.py -python3 ./test.py -f 2-query/Now.py -python3 ./test.py -f 2-query/Today.py -python3 ./test.py -f 2-query/max.py -python3 ./test.py -f 2-query/min.py -python3 ./test.py -f 2-query/count.py -python3 ./test.py -f 2-query/last.py -python3 ./test.py -f 2-query/first.py -python3 ./test.py -f 2-query/To_iso8601.py -python3 ./test.py -f 2-query/To_unixtimestamp.py -python3 ./test.py -f 2-query/timetruncate.py -python3 ./test.py -f 2-query/diff.py -python3 ./test.py -f 2-query/Timediff.py -python3 ./test.py -f 2-query/json_tag.py - -python3 ./test.py -f 2-query/top.py -python3 ./test.py -f 2-query/percentile.py -python3 ./test.py -f 2-query/floor.py -python3 ./test.py -f 2-query/round.py -python3 ./test.py -f 2-query/log.py -python3 ./test.py -f 2-query/pow.py -python3 ./test.py -f 2-query/sqrt.py -python3 ./test.py -f 2-query/sin.py -python3 ./test.py -f 2-query/cos.py -python3 ./test.py -f 2-query/tan.py -python3 ./test.py -f 2-query/query_cols_tags_and_or.py -# python3 ./test.py -f 2-query/nestedQuery.py -# TD-15983 subquery output duplicate name column. -# Please Xiangyang Guo modify the following script -# python3 ./test.py -f 2-query/nestedQuery_str.py - -python3 ./test.py -f 2-query/elapsed.py -python3 ./test.py -f 2-query/csum.py -python3 ./test.py -f 2-query/mavg.py -python3 ./test.py -f 2-query/diff.py -python3 ./test.py -f 2-query/sample.py -python3 ./test.py -f 2-query/function_diff.py -python3 ./test.py -f 2-query/unique.py -python3 ./test.py -f 2-query/stateduration.py -python3 ./test.py -f 2-query/function_stateduration.py -python3 ./test.py -f 2-query/statecount.py -python3 ./test.py -f 2-query/tail.py -python3 ./test.py -f 2-query/ttl_comment.py -python3 ./test.py -f 2-query/distribute_agg_count.py -python3 ./test.py -f 2-query/distribute_agg_max.py -python3 ./test.py -f 2-query/distribute_agg_min.py -python3 ./test.py -f 2-query/distribute_agg_sum.py -python3 ./test.py -f 2-query/distribute_agg_spread.py -python3 ./test.py -f 2-query/distribute_agg_apercentile.py -python3 ./test.py -f 2-query/distribute_agg_avg.py -python3 ./test.py -f 2-query/distribute_agg_stddev.py -python3 ./test.py -f 2-query/twa.py -python3 ./test.py -f 2-query/irate.py -python3 ./test.py -f 2-query/count_partition.py -python3 ./test.py -f 2-query/function_null.py -python3 ./test.py -f 2-query/queryQnode.py -python3 ./test.py -f 2-query/max_partition.py -python3 ./test.py -f 2-query/last_row.py - -python3 ./test.py -f 6-cluster/5dnode1mnode.py -python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3 - -# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3 - -python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3 -# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartMnodeInsertData.py -N 5 -M 3 -# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartVnodeInsertData.py -N 5 -M 3 - -python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5 -# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py -# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 -# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3 - - -python3 ./test.py -f 7-tmq/basic5.py -python3 ./test.py -f 7-tmq/subscribeDb.py -python3 ./test.py -f 7-tmq/subscribeDb0.py -python3 ./test.py -f 7-tmq/subscribeDb1.py -python3 ./test.py -f 7-tmq/subscribeDb2.py -python3 ./test.py -f 7-tmq/subscribeDb3.py -#python3 ./test.py -f 7-tmq/subscribeDb4.py -python3 ./test.py -f 7-tmq/subscribeStb.py -python3 ./test.py -f 7-tmq/subscribeStb0.py -python3 ./test.py -f 7-tmq/subscribeStb1.py -python3 ./test.py -f 7-tmq/subscribeStb2.py -python3 ./test.py -f 7-tmq/subscribeStb3.py -python3 ./test.py -f 7-tmq/subscribeStb4.py -python3 ./test.py -f 7-tmq/db.py -python3 ./test.py -f 7-tmq/tmqError.py -python3 ./test.py -f 7-tmq/schema.py -python3 ./test.py -f 7-tmq/stbFilter.py -python3 ./test.py -f 7-tmq/tmqCheckData.py -python3 ./test.py -f 7-tmq/tmqCheckData1.py -#python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 5 -python3 ./test.py -f 7-tmq/tmqConsumerGroup.py -python3 ./test.py -f 7-tmq/tmqShow.py -python3 ./test.py -f 7-tmq/tmqAlterSchema.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py -python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py -python3 ./test.py -f 7-tmq/tmqDnodeRestart.py -python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py -python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py -python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py -python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot1.py -python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py -python3 ./test.py -f 7-tmq/tmqDelete-multiCtb.py -python3 ./test.py -f 7-tmq/tmqDropStb.py -python3 ./test.py -f 7-tmq/tmqDropStbCtb.py -python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot0.py -python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot1.py -python3 ./test.py -f 7-tmq/tmqUdf.py -python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py -python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py -python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py -python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py -python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py -# python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py - -#------------querPolicy 2----------- - -python3 ./test.py -f 2-query/between.py -Q 2 -python3 ./test.py -f 2-query/distinct.py -Q 2 -python3 ./test.py -f 2-query/varchar.py -Q 2 -python3 ./test.py -f 2-query/ltrim.py -Q 2 -python3 ./test.py -f 2-query/rtrim.py -Q 2 -python3 ./test.py -f 2-query/length.py -Q 2 -python3 ./test.py -f 2-query/char_length.py -Q 2 -python3 ./test.py -f 2-query/upper.py -Q 2 -python3 ./test.py -f 2-query/lower.py -Q 2 -python3 ./test.py -f 2-query/join.py -Q 2 -python3 ./test.py -f 2-query/join2.py -Q 2 -python3 ./test.py -f 2-query/cast.py -Q 2 -python3 ./test.py -f 2-query/substr.py -Q 2 -python3 ./test.py -f 2-query/union.py -Q 2 -python3 ./test.py -f 2-query/union1.py -Q 2 -python3 ./test.py -f 2-query/concat.py -Q 2 -python3 ./test.py -f 2-query/concat2.py -Q 2 -python3 ./test.py -f 2-query/concat_ws.py -Q 2 -python3 ./test.py -f 2-query/concat_ws2.py -Q 2 -python3 ./test.py -f 2-query/check_tsdb.py -Q 2 -python3 ./test.py -f 2-query/spread.py -Q 2 -python3 ./test.py -f 2-query/hyperloglog.py -Q 2 -python3 ./test.py -f 2-query/explain.py -Q 2 -python3 ./test.py -f 2-query/leastsquares.py -Q 2 -python3 ./test.py -f 2-query/timezone.py -Q 2 -python3 ./test.py -f 2-query/Now.py -Q 2 -python3 ./test.py -f 2-query/Today.py -Q 2 -python3 ./test.py -f 2-query/max.py -Q 2 -python3 ./test.py -f 2-query/min.py -Q 2 -python3 ./test.py -f 2-query/count.py -Q 2 -python3 ./test.py -f 2-query/last.py -Q 2 -python3 ./test.py -f 2-query/first.py -Q 2 -python3 ./test.py -f 2-query/To_iso8601.py -Q 2 -python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 2 -python3 ./test.py -f 2-query/timetruncate.py -Q 2 -python3 ./test.py -f 2-query/diff.py -Q 2 -python3 ./test.py -f 2-query/Timediff.py -Q 2 -python3 ./test.py -f 2-query/json_tag.py -Q 2 -python3 ./test.py -f 2-query/top.py -Q 2 -python3 ./test.py -f 2-query/bottom.py -Q 2 -python3 ./test.py -f 2-query/percentile.py -Q 2 -python3 ./test.py -f 2-query/apercentile.py -Q 2 -python3 ./test.py -f 2-query/abs.py -Q 2 -python3 ./test.py -f 2-query/ceil.py -Q 2 -python3 ./test.py -f 2-query/floor.py -Q 2 -python3 ./test.py -f 2-query/round.py -Q 2 -python3 ./test.py -f 2-query/log.py -Q 2 -python3 ./test.py -f 2-query/pow.py -Q 2 -python3 ./test.py -f 2-query/sqrt.py -Q 2 -python3 ./test.py -f 2-query/sin.py -Q 2 -python3 ./test.py -f 2-query/cos.py -Q 2 -python3 ./test.py -f 2-query/tan.py -Q 2 -python3 ./test.py -f 2-query/arcsin.py -Q 2 -python3 ./test.py -f 2-query/arccos.py -Q 2 -python3 ./test.py -f 2-query/arctan.py -Q 2 -python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2 - -# python3 ./test.py -f 2-query/nestedQuery.py -Q 2 -# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2 - -python3 ./test.py -f 2-query/avg.py -Q 2 -# python3 ./test.py -f 2-query/elapsed.py -Q 2 -python3 ./test.py -f 2-query/csum.py -Q 2 -python3 ./test.py -f 2-query/mavg.py -Q 2 -python3 ./test.py -f 2-query/diff.py -Q 2 -python3 ./test.py -f 2-query/sample.py -Q 2 -python3 ./test.py -f 2-query/function_diff.py -Q 2 -python3 ./test.py -f 2-query/unique.py -Q 2 -python3 ./test.py -f 2-query/stateduration.py -Q 2 -python3 ./test.py -f 2-query/function_stateduration.py -Q 2 -python3 ./test.py -f 2-query/statecount.py -Q 2 -python3 ./test.py -f 2-query/tail.py -Q 2 -python3 ./test.py -f 2-query/ttl_comment.py -Q 2 -python3 ./test.py -f 2-query/distribute_agg_count.py -Q 2 -python3 ./test.py -f 2-query/distribute_agg_max.py -Q 2 -python3 ./test.py -f 2-query/distribute_agg_min.py -Q 2 -python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 2 -python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 2 -python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 2 -python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 2 -python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 2 -python3 ./test.py -f 2-query/twa.py -Q 2 -python3 ./test.py -f 2-query/irate.py -Q 2 -python3 ./test.py -f 2-query/function_null.py -Q 2 -python3 ./test.py -f 2-query/count_partition.py -Q 2 -python3 ./test.py -f 2-query/max_partition.py -Q 2 -python3 ./test.py -f 2-query/last_row.py -Q 2 - -#------------querPolicy 3----------- - -python3 ./test.py -f 2-query/between.py -Q 3 -python3 ./test.py -f 2-query/distinct.py -Q 3 -python3 ./test.py -f 2-query/varchar.py -Q 3 -python3 ./test.py -f 2-query/ltrim.py -Q 3 -python3 ./test.py -f 2-query/rtrim.py -Q 3 -python3 ./test.py -f 2-query/length.py -Q 3 -python3 ./test.py -f 2-query/char_length.py -Q 3 -python3 ./test.py -f 2-query/upper.py -Q 3 -python3 ./test.py -f 2-query/lower.py -Q 3 -python3 ./test.py -f 2-query/join.py -Q 3 -python3 ./test.py -f 2-query/join2.py -Q 3 -python3 ./test.py -f 2-query/cast.py -Q 3 -python3 ./test.py -f 2-query/substr.py -Q 3 -python3 ./test.py -f 2-query/union.py -Q 3 -python3 ./test.py -f 2-query/union1.py -Q 3 -python3 ./test.py -f 2-query/concat.py -Q 3 -python3 ./test.py -f 2-query/concat2.py -Q 3 -python3 ./test.py -f 2-query/concat_ws.py -Q 3 -python3 ./test.py -f 2-query/concat_ws2.py -Q 3 -python3 ./test.py -f 2-query/check_tsdb.py -Q 3 -python3 ./test.py -f 2-query/spread.py -Q 3 -python3 ./test.py -f 2-query/hyperloglog.py -Q 3 -python3 ./test.py -f 2-query/explain.py -Q 3 -python3 ./test.py -f 2-query/leastsquares.py -Q 3 -python3 ./test.py -f 2-query/timezone.py -Q 3 -python3 ./test.py -f 2-query/Now.py -Q 3 -python3 ./test.py -f 2-query/Today.py -Q 3 -python3 ./test.py -f 2-query/max.py -Q 3 -python3 ./test.py -f 2-query/min.py -Q 3 -python3 ./test.py -f 2-query/count.py -Q 3 -#python3 ./test.py -f 2-query/last.py -Q 3 -python3 ./test.py -f 2-query/first.py -Q 3 -python3 ./test.py -f 2-query/To_iso8601.py -Q 3 -python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 3 -python3 ./test.py -f 2-query/timetruncate.py -Q 3 -python3 ./test.py -f 2-query/diff.py -Q 3 -python3 ./test.py -f 2-query/Timediff.py -Q 3 -python3 ./test.py -f 2-query/json_tag.py -Q 3 -python3 ./test.py -f 2-query/top.py -Q 3 -python3 ./test.py -f 2-query/bottom.py -Q 3 -python3 ./test.py -f 2-query/percentile.py -Q 3 -python3 ./test.py -f 2-query/apercentile.py -Q 3 -python3 ./test.py -f 2-query/abs.py -Q 3 -python3 ./test.py -f 2-query/ceil.py -Q 3 -python3 ./test.py -f 2-query/floor.py -Q 3 -python3 ./test.py -f 2-query/round.py -Q 3 -python3 ./test.py -f 2-query/log.py -Q 3 -python3 ./test.py -f 2-query/pow.py -Q 3 -python3 ./test.py -f 2-query/sqrt.py -Q 3 -python3 ./test.py -f 2-query/sin.py -Q 3 -python3 ./test.py -f 2-query/cos.py -Q 3 -python3 ./test.py -f 2-query/tan.py -Q 3 -python3 ./test.py -f 2-query/arcsin.py -Q 3 -python3 ./test.py -f 2-query/arccos.py -Q 3 -python3 ./test.py -f 2-query/arctan.py -Q 3 -python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3 -# python3 ./test.py -f 2-query/nestedQuery.py -Q 3 -# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 -# python3 ./test.py -f 2-query/avg.py -Q 3 -# python3 ./test.py -f 2-query/elapsed.py -Q 3 -python3 ./test.py -f 2-query/csum.py -Q 3 -python3 ./test.py -f 2-query/mavg.py -Q 3 -python3 ./test.py -f 2-query/diff.py -Q 3 -python3 ./test.py -f 2-query/sample.py -Q 3 -python3 ./test.py -f 2-query/function_diff.py -Q 3 -python3 ./test.py -f 2-query/unique.py -Q 3 -python3 ./test.py -f 2-query/stateduration.py -Q 3 -python3 ./test.py -f 2-query/function_stateduration.py -Q 3 -python3 ./test.py -f 2-query/statecount.py -Q 3 -python3 ./test.py -f 2-query/tail.py -Q 3 -python3 ./test.py -f 2-query/ttl_comment.py -Q 3 -python3 ./test.py -f 2-query/distribute_agg_count.py -Q 3 -python3 ./test.py -f 2-query/distribute_agg_max.py -Q 3 -python3 ./test.py -f 2-query/distribute_agg_min.py -Q 3 -python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 3 -python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 3 -python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 3 -python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 3 -python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 3 -python3 ./test.py -f 2-query/twa.py -Q 3 -python3 ./test.py -f 2-query/irate.py -Q 3 -python3 ./test.py -f 2-query/function_null.py -Q 3 -python3 ./test.py -f 2-query/count_partition.py -Q 3 -python3 ./test.py -f 2-query/max_partition.py -Q 3 -python3 ./test.py -f 2-query/last_row.py -Q 3 - - # test case of vnode python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 From 4f3efe7af607a7b5f9aae2cf6ab4b9c143e9cbda Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Fri, 29 Jul 2022 16:59:10 +0800 Subject: [PATCH 19/63] update case --- .../4dnode1mnode_basic_replica3_insertdatas_stop_leader.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py index c941bbcdcc..a50fd1b8ca 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py @@ -292,6 +292,9 @@ class TDTestCase: os.system(" {} -f {} >>/dev/null 2>&1 ".format(benchmark_build_path , json_file)) def stop_leader_when_Benchmark_inserts(self,dbname , total_rows , json_file ): + + newTdSql=tdCom.newTdSql() + # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes tdSql.execute(" drop database if exists {} ".format(dbname)) From d9c51e871f413efb7bb678696620b6fff3ce8c3f Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Fri, 29 Jul 2022 17:23:59 +0800 Subject: [PATCH 20/63] update --- tests/system-test/fulltest.sh | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 496adbf339..458951b815 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -2,6 +2,17 @@ set -e set -x +python3 ./test.py -f 0-others/taosShell.py +python3 ./test.py -f 0-others/taosShellError.py +python3 ./test.py -f 0-others/taosShellNetChk.py +python3 ./test.py -f 0-others/telemetry.py +python3 ./test.py -f 0-others/taosdMonitor.py +python3 ./test.py -f 0-others/udfTest.py +python3 ./test.py -f 0-others/udf_create.py +python3 ./test.py -f 0-others/udf_restart_taosd.py +python3 ./test.py -f 0-others/cachemodel.py +python3 ./test.py -f 0-others/udf_cfg1.py +python3 ./test.py -f 0-others/udf_cfg2.py python3 ./test.py -f 0-others/sysinfo.py python3 ./test.py -f 0-others/user_control.py @@ -415,5 +426,4 @@ python3 ./test.py -f 2-query/function_null.py -Q 3 python3 ./test.py -f 2-query/count_partition.py -Q 3 python3 ./test.py -f 2-query/max_partition.py -Q 3 python3 ./test.py -f 2-query/last_row.py -Q 3 -python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 - +python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 \ No newline at end of file From 563d4880bf8e40daf4e027d3f512b8391a3e10ff Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Fri, 29 Jul 2022 17:28:19 +0800 Subject: [PATCH 21/63] fix crash of snapshot --- source/dnode/vnode/src/meta/metaSnapshot.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index 85261d302e..8b762215db 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -144,6 +144,8 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr pWriter->pMeta = pMeta; pWriter->sver = sver; pWriter->ever = ever; + + metaBegin(pMeta); *ppWriter = pWriter; return code; From 0c111cd2bf7a1a600f5037b45a622e8c6cb1d55d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 29 Jul 2022 18:15:44 +0800 Subject: [PATCH 22/63] fix: update schema to newest version to parsing rows --- source/dnode/vnode/src/tsdb/tsdbRead.c | 122 +++++++++++++------------ 1 file changed, 63 insertions(+), 59 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 03985654f8..0eec715bd1 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -80,15 +80,15 @@ typedef struct SFilesetIter { typedef struct SFileDataBlockInfo { // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it uint64_t uid; - int32_t tbBlockIdx; + int32_t tbBlockIdx; } SFileDataBlockInfo; typedef struct SDataBlockIter { - int32_t numOfBlocks; - int32_t index; - SArray* blockList; // SArray - int32_t order; - SBlock block; // current SBlock data + int32_t numOfBlocks; + int32_t index; + SArray* blockList; // SArray + int32_t order; + SBlock block; // current SBlock data SHashObj* pTableMap; } SDataBlockIter; @@ -124,8 +124,8 @@ struct STsdbReader { SSDataBlock* pResBlock; int32_t capacity; SReaderStatus status; - char* idStr; // query info handle, for debug purpose - int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows + char* idStr; // query info handle, for debug purpose + int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows SBlockLoadSuppInfo suppInfo; STsdbReadSnap* pReadSnap; SIOCostSummary cost; @@ -133,8 +133,8 @@ struct STsdbReader { SDataFReader* pFileReader; SVersionRange verRange; - int32_t step; - STsdbReader* innerReader[2]; + int32_t step; + STsdbReader* innerReader[2]; }; static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter); @@ -211,8 +211,8 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK pTsdbReader->idStr); } - tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, %s", pTsdbReader, numOfTables, (sizeof(STableBlockScanInfo)*numOfTables)/1024.0, - pTsdbReader->idStr); + tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, %s", pTsdbReader, numOfTables, + (sizeof(STableBlockScanInfo) * numOfTables) / 1024.0, pTsdbReader->idStr); return pTableMap; } @@ -396,7 +396,8 @@ static SSDataBlock* createResBlock(SQueryTableDataCond* pCond, int32_t capacity) return pResBlock; } -static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity, const char* idstr) { +static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity, + const char* idstr) { int32_t code = 0; int8_t level = 0; STsdbReader* pReader = (STsdbReader*)taosMemoryCalloc(1, sizeof(*pReader)); @@ -576,7 +577,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, int64_t et2 = taosGetTimestampUs(); tsdbDebug("load block index for %d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s", - (int32_t)num, (et1 - st)/1000.0, (et2-et1)/1000.0, num * sizeof(SBlockIdx)/1024.0, pReader->idStr); + (int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, pReader->idStr); pReader->cost.headFileLoadTime += (et1 - st) / 1000.0; @@ -591,7 +592,7 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_ *numOfValidTables = 0; int64_t st = taosGetTimestampUs(); - size_t size = 0; + size_t size = 0; STableBlockScanInfo* px = NULL; while (1) { @@ -641,9 +642,9 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_ } } - double el = (taosGetTimestampUs() - st)/1000.0; + double el = (taosGetTimestampUs() - st) / 1000.0; tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, size:%.2f Kb, elapsed time:%.2f ms %s", - numOfTables, *numOfBlocks, *numOfValidTables, size/1000.0, el, pReader->idStr); + numOfTables, *numOfBlocks, *numOfValidTables, size / 1000.0, el, pReader->idStr); pReader->cost.numOfBlocks += (*numOfBlocks); pReader->cost.headFileLoadTime += el; @@ -679,9 +680,7 @@ static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter) { return pFBlockInfo; } -static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { - return &pBlockIter->block; -} +static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; } static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) { SReaderStatus* pStatus = &pReader->status; @@ -689,7 +688,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn SBlockData* pBlockData = &pStatus->fileBlockData; SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - SBlock* pBlock = getCurrentBlock(pBlockIter); + SBlock* pBlock = getCurrentBlock(pBlockIter); SSDataBlock* pResBlock = pReader->pResBlock; int32_t numOfCols = blockDataGetNumOfCols(pResBlock); @@ -772,17 +771,17 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI int64_t st = taosGetTimestampUs(); SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - SBlock* pBlock = getCurrentBlock(pBlockIter); + SBlock* pBlock = getCurrentBlock(pBlockIter); - SSDataBlock* pResBlock = pReader->pResBlock; - int32_t numOfCols = blockDataGetNumOfCols(pResBlock); + SSDataBlock* pResBlock = pReader->pResBlock; + int32_t numOfCols = blockDataGetNumOfCols(pResBlock); SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo; SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; - SBlockIdx blockIdx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid}; - int32_t code = tsdbReadColData(pReader->pFileReader, &blockIdx, pBlock, pSupInfo->colIds, numOfCols, - pBlockData, NULL, NULL); + SBlockIdx blockIdx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid}; + int32_t code = + tsdbReadColData(pReader->pFileReader, &blockIdx, pBlock, pSupInfo->colIds, numOfCols, pBlockData, NULL, NULL); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -857,7 +856,7 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v } static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) { - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); + SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid)); int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx); @@ -882,7 +881,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte int64_t st = taosGetTimestampUs(); SBlockOrderSupporter sup = {0}; - int32_t code = initBlockOrderSupporter(&sup, numOfTables); + int32_t code = initBlockOrderSupporter(&sup, numOfTables); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -937,8 +936,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte } int64_t et = taosGetTimestampUs(); - tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s", pReader, cnt, - (et - st)/1000.0, pReader->idStr); + tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s", + pReader, cnt, (et - st) / 1000.0, pReader->idStr); pBlockIter->index = asc ? 0 : (numOfBlocks - 1); cleanupBlockOrderSupporter(&sup); @@ -976,7 +975,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte } int64_t et = taosGetTimestampUs(); - tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et-st)/1000.0, pReader->idStr); + tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et - st) / 1000.0, + pReader->idStr); cleanupBlockOrderSupporter(&sup); taosMemoryFree(pTree); @@ -1024,7 +1024,7 @@ static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STab int32_t step = asc ? 1 : -1; *nextIndex = pFBlockInfo->tbBlockIdx + step; - SBlock *pBlock = taosMemoryCalloc(1, sizeof(SBlock)); + SBlock* pBlock = taosMemoryCalloc(1, sizeof(SBlock)); int32_t* indexInMapdata = taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex); tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, *indexInMapdata, pBlock, tGetBlock); @@ -1196,10 +1196,10 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* setComposedBlockFlag(pReader, true); double elapsedTime = (taosGetTimestampUs() - st) / 1000.0; - tsdbDebug( - "%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange: %" PRId64 - " - %" PRId64 " %s", - pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, pReader->idStr); + tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange: %" PRId64 + " - %" PRId64 " %s", + pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, + pReader->idStr); pReader->cost.buildmemBlock += elapsedTime; return code; @@ -1499,9 +1499,10 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo* setComposedBlockFlag(pReader, true); int64_t et = taosGetTimestampUs(); - tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, elapsed time:%.2f ms %s", pReader, - pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows, - (et - st)/1000.0, pReader->idStr); + tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 + " rows:%d, elapsed time:%.2f ms %s", + pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr); return TSDB_CODE_SUCCESS; } @@ -1679,7 +1680,7 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { SReaderStatus* pStatus = &pReader->status; - size_t numOfTables = taosHashGetSize(pReader->status.pTableMap); + size_t numOfTables = taosHashGetSize(pReader->status.pTableMap); SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx)); while (1) { @@ -2212,7 +2213,7 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc CHECK_FILEBLOCK_STATE st; SFileDataBlockInfo* pFileBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); - SBlock* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter); + SBlock* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter); checkForNeighborFileBlock(pReader, pScanInfo, pCurrentBlock, pFileBlockInfo, pMerger, key, &st); if (st == CHECK_FILEBLOCK_QUIT) { break; @@ -2228,7 +2229,7 @@ void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader) { if (pReader->pSchema == NULL) { metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema); - } else if (pReader->pSchema->version != sversion) { + } else if (pReader->pSchema->version < sversion) { taosMemoryFreeClear(pReader->pSchema); metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema); } @@ -2466,7 +2467,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl if (pCond->type == TIMEWINDOW_RANGE_EXTERNAL) { // update the SQueryTableDataCond to create inner reader STimeWindow w = pCond->twindows; - int32_t order = pCond->order; + int32_t order = pCond->order; if (order == TSDB_ORDER_ASC) { pCond->twindows.ekey = pCond->twindows.skey; pCond->twindows.skey = INT64_MIN; @@ -2485,7 +2486,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl if (order == TSDB_ORDER_ASC) { pCond->twindows.skey = w.ekey; pCond->twindows.ekey = INT64_MAX; - } else { + } else { pCond->twindows.skey = INT64_MIN; pCond->twindows.ekey = w.ekey; } @@ -2533,10 +2534,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl } } } else { - STsdbReader* pPrevReader = pReader->innerReader[0]; + STsdbReader* pPrevReader = pReader->innerReader[0]; SDataBlockIter* pBlockIter = &pPrevReader->status.blockIter; - initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order, pPrevReader->idStr); + initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order, + pPrevReader->idStr); resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap); // no data in files, let's try buffer in memory @@ -2591,12 +2593,14 @@ void tsdbReaderClose(STsdbReader* pReader) { SIOCostSummary* pCost = &pReader->cost; - tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%"PRId64" SMA-time:%.2f ms, " - "fileBlocks:%"PRId64", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo " - "size:%.2f Kb %s", + tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64 + " SMA-time:%.2f ms, " + "fileBlocks:%" PRId64 + ", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo " + "size:%.2f Kb %s", pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime, pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock, - numOfTables * sizeof(STableBlockScanInfo) /1000.0, pReader->idStr); + numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr); taosMemoryFree(pReader->idStr); taosMemoryFree(pReader->pSchema); @@ -2675,9 +2679,9 @@ static void setBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) { void tsdbRetrieveDataBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) { if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) { - if (pReader->step == EXTERNAL_ROWS_MAIN) { + if (pReader->step == EXTERNAL_ROWS_MAIN) { setBlockInfo(pReader, pDataBlockInfo); - } else if (pReader->step == EXTERNAL_ROWS_PREV) { + } else if (pReader->step == EXTERNAL_ROWS_PREV) { setBlockInfo(pReader->innerReader[0], pDataBlockInfo); } else { setBlockInfo(pReader->innerReader[1], pDataBlockInfo); @@ -2691,7 +2695,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS int32_t code = 0; *allHave = false; - if(pReader->type == TIMEWINDOW_RANGE_EXTERNAL) { + if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) { *pBlockStatis = NULL; return TSDB_CODE_SUCCESS; } @@ -2702,7 +2706,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS return TSDB_CODE_SUCCESS; } - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter); + SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter); SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter); int64_t stime = taosGetTimestampUs(); @@ -2807,7 +2811,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { } pReader->order = pCond->order; - pReader->type = TIMEWINDOW_RANGE_CONTAINED; + pReader->type = TIMEWINDOW_RANGE_CONTAINED; pReader->status.loadFromFile = true; pReader->status.pTableIter = NULL; pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows); @@ -2826,7 +2830,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap); resetDataBlockScanInfo(pReader->status.pTableMap); - int32_t code = 0; + int32_t code = 0; SDataBlockIter* pBlockIter = &pReader->status.blockIter; // no data in files, let's try buffer in memory @@ -2835,8 +2839,8 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { } else { code = initForFirstBlockInFile(pReader, pBlockIter); if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s", - pReader, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr); + tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s", pReader, + numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr); return code; } } From c0003de3168d1ab540901e7b4629d8f5f926065c Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 29 Jul 2022 18:26:31 +0800 Subject: [PATCH 23/63] fix sim error --- source/libs/index/src/indexFilter.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index e1c8ac0204..1f257bb05f 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -385,6 +385,15 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP .reverse = reverse, .filterFunc = filterFunc}; + char buf[128] = {0}; + if (IS_VAR_DATA_TYPE(left->colValType)) { + if (!IS_VAR_DATA_TYPE(right->colValType)) { + NUM_TO_STRING(right->colValType, right->condValue, sizeof(buf), buf + VARSTR_HEADER_SIZE); + varDataSetLen(buf, strlen(buf + VARSTR_HEADER_SIZE)); + + param.val = buf; + } + } ret = metaFilterTableIds(arg->metaEx, ¶m, output->result); } return ret; From 4de8d28f639b502b038247d01d2fca8648c7acbc Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 29 Jul 2022 18:35:23 +0800 Subject: [PATCH 24/63] fix: fix ucs4 convertion issue --- include/os/osString.h | 2 + source/client/src/clientEnv.c | 2 + source/client/src/clientHb.c | 1 + source/client/src/clientMain.c | 2 + source/libs/scheduler/src/schJob.c | 2 +- source/os/src/osString.c | 68 ++++++++++++++++++++++++++++-- 6 files changed, 72 insertions(+), 5 deletions(-) diff --git a/include/os/osString.h b/include/os/osString.h index 3a4ff18694..74d5e52a1b 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -62,6 +62,8 @@ typedef int32_t TdUcs4; int32_t taosUcs4len(TdUcs4 *ucs4); int64_t taosStr2int64(const char *str); +int32_t taosConvInit(int32_t maxNum); +void taosConvDestroy(); int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs); bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len); int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes); diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 893aa39ce3..c012533056 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -361,6 +361,8 @@ void taos_init_imp(void) { initQueryModuleMsgHandle(); + taosConvInit(256); + rpcInit(); SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100}; diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 3d6cf1c626..06bd3f3887 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -353,6 +353,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) { desc.subDesc = NULL; desc.subPlanNum = 0; } + desc.subPlanNum = taosArrayGetSize(desc.subDesc); ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc)); } else { desc.subDesc = NULL; diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index b04b4ea2aa..a4eaf057d1 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -75,6 +75,8 @@ void taos_cleanup(void) { cleanupTaskQueue(); + taosConvDestroy(); + tscInfo("all local resources released"); taosCleanupCfg(); taosCloseLog(); diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index 46140ccdff..82146c3741 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -278,7 +278,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { } SHashObj *planToTask = taosHashInit( - SCHEDULE_DEFAULT_MAX_TASK_NUM, + pDag->numOfSubplans, taosGetDefaultHashFunction(POINTER_BYTES == sizeof(int64_t) ? TSDB_DATA_TYPE_BIGINT : TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); if (NULL == planToTask) { diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 32f0fdf7b3..d6b0bafe8f 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -134,21 +134,81 @@ int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) { #endif } +typedef struct { + iconv_t conv; + int8_t inUse; +} SConv; + +SConv *gConv = NULL; +int32_t convUsed = 0; +int32_t gConvMaxNum = 0; + +int32_t taosConvInit(int32_t maxNum) { + gConvMaxNum = maxNum * 2; + gConv = taosMemoryCalloc(gConvMaxNum, sizeof(SConv)); + for (int32_t i = 0; i < gConvMaxNum; ++i) { + gConv[i].conv = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); + } + + return 0; +} + +void taosConvDestroy() { + for (int32_t i = 0; i < gConvMaxNum; ++i) { + iconv_close(gConv[i].conv); + } + taosMemoryFreeClear(gConv); +} + +void taosAcquireConv(int32_t *idx) { + while (true) { + int32_t used = atomic_add_fetch_32(&convUsed, 1); + if (used > gConvMaxNum) { + used = atomic_sub_fetch_32(&convUsed, 1); + sched_yield(); + continue; + } + + break; + } + + int32_t startId = taosGetSelfPthreadId() % gConvMaxNum; + while (true) { + if (gConv[startId].inUse) { + startId = (startId + 1) % gConvMaxNum; + continue; + } + + int8_t old = atomic_val_compare_exchange_8(&gConv[startId].inUse, 0, 1); + if (0 == old) { + break; + } + } + + *idx = startId; +} + +void taosReleaseConv(int32_t idx) { + atomic_store_8(&gConv[idx].inUse, 0); +} + bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len) { #ifdef DISALLOW_NCHAR_WITHOUT_ICONV printf("Nchar cannot be read and written without iconv, please install iconv library and recompile TDengine.\n"); return -1; #else memset(ucs4, 0, ucs4_max_len); - iconv_t cd = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); + + int32_t idx = 0; + taosAcquireConv(&idx); size_t ucs4_input_len = mbsLength; size_t outLeft = ucs4_max_len; - if (iconv(cd, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) { - iconv_close(cd); + if (iconv(gConv[idx].conv, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) { + taosReleaseConv(idx); return false; } - iconv_close(cd); + taosReleaseConv(idx); if (len != NULL) { *len = (int32_t)(ucs4_max_len - outLeft); if (*len < 0) { From c2e4110ae1f43fad70fded02031408d7533c88b6 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 29 Jul 2022 19:49:45 +0800 Subject: [PATCH 25/63] fix: fix mbs2ucs4 issue --- source/os/src/osString.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/source/os/src/osString.c b/source/os/src/osString.c index d6b0bafe8f..0642bd768b 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -161,6 +161,13 @@ void taosConvDestroy() { } void taosAcquireConv(int32_t *idx) { + if (0 == gConvMaxNum) { + gConv = taosMemoryCalloc(1, sizeof(SConv)); + gConv[0].conv = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); + *idx = 0; + return; + } + while (true) { int32_t used = atomic_add_fetch_32(&convUsed, 1); if (used > gConvMaxNum) { @@ -189,6 +196,12 @@ void taosAcquireConv(int32_t *idx) { } void taosReleaseConv(int32_t idx) { + if (0 == gConvMaxNum) { + iconv_close(gConv[0].conv); + taosMemoryFreeClear(gConv); + return; + } + atomic_store_8(&gConv[idx].inUse, 0); } From 6c250e75cd0e9f45a9ec1c80377c6c638fea91fb Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Fri, 29 Jul 2022 21:28:18 +0800 Subject: [PATCH 26/63] fix(stream): forbid histogram in stream --- examples/c/tmq.c | 4 ++-- source/libs/executor/src/executor.c | 1 - source/libs/executor/src/scanoperator.c | 10 ---------- source/libs/function/src/builtins.c | 12 ++++++------ source/libs/stream/src/streamDispatch.c | 3 ++- 5 files changed, 10 insertions(+), 20 deletions(-) diff --git a/examples/c/tmq.c b/examples/c/tmq.c index fd1f146618..3686251b4b 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -29,7 +29,7 @@ static void msg_process(TAOS_RES* msg) { printf("vg: %d\n", tmq_get_vgroup_id(msg)); if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) { tmq_raw_data raw = {0}; - int32_t code = tmq_get_raw(msg, &raw); + int32_t code = tmq_get_raw(msg, &raw); if (code == 0) { TAOS* pConn = taos_connect("192.168.1.86", "root", "taosdata", NULL, 0); if (pConn == NULL) { @@ -302,7 +302,7 @@ int32_t create_topic() { } taos_free_result(pRes); -// pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1"); + // pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1"); pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1"); if (taos_errno(pRes) != 0) { printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 171f39db1b..6f8317c9cd 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -48,7 +48,6 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu pOperator->status = OP_NOT_OPENED; SStreamScanInfo* pInfo = pOperator->info; - /*pInfo->assignBlockUid = assignUid;*/ // TODO: if a block was set but not consumed, // prevent setting a different type of block diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 4a2f57d628..8f09389248 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -408,10 +408,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { pBlock->info.groupId = *groupId; } - if (pTableScanInfo->assignBlockUid) { - pBlock->info.groupId = pBlock->info.uid; - } - pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows; pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0; @@ -1100,12 +1096,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock pInfo->pRes->info.groupId = 0; } - // for generating rollup SMA result, each time is an independent time serie. - // TODO temporarily used, when the statement of "partition by tbname" is ready, remove this - if (pInfo->assignBlockUid) { - pInfo->pRes->info.groupId = pBlock->info.uid; - } - // todo extract method for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) { SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 40e26f6be6..abac9e8073 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2221,7 +2221,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "interp", .type = FUNCTION_TYPE_INTERP, - .classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateFirstLast, .getEnvFunc = getSelectivityFuncEnv, .initFunc = functionSetup, @@ -2231,7 +2231,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "derivative", .type = FUNCTION_TYPE_DERIVATIVE, - .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateDerivative, .getEnvFunc = getDerivativeFuncEnv, .initFunc = derivativeFuncSetup, @@ -2264,7 +2264,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_cache_last_row", .type = FUNCTION_TYPE_CACHE_LAST_ROW, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateFirstLast, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -2358,7 +2358,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "histogram", .type = FUNCTION_TYPE_HISTOGRAM, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateHistogram, .getEnvFunc = getHistogramFuncEnv, .initFunc = histogramFunctionSetup, @@ -2502,8 +2502,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "tail", .type = FUNCTION_TYPE_TAIL, - .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | - FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, .translateFunc = translateTail, .getEnvFunc = getTailFuncEnv, .initFunc = tailFunctionSetup, diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index ec1dd693e1..95e5b61dbd 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -462,7 +462,8 @@ int32_t streamDispatch(SStreamTask* pTask) { if (streamDispatchAllBlocks(pTask, pBlock) < 0) { ASSERT(0); code = -1; - // TODO set status fail + streamQueueProcessFail(pTask->outputQueue); + atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL); goto FREE; } /*atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);*/ From e3b73ba5849f9e7ecf4366a7ae0687044224f695 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Sat, 30 Jul 2022 08:34:16 +0800 Subject: [PATCH 27/63] fix: fix release conv handle issue --- source/os/src/osString.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 0642bd768b..329b039084 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -203,6 +203,7 @@ void taosReleaseConv(int32_t idx) { } atomic_store_8(&gConv[idx].inUse, 0); + atomic_sub_fetch_32(&convUsed, 1); } bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len) { From 9fbefbb16748355b58842980dc2bb83666d17f3c Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Sat, 30 Jul 2022 09:00:21 +0800 Subject: [PATCH 28/63] fix: fix task queue thread issue --- source/common/src/tglobal.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 0cc4e31aed..ba7f9f93c1 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -49,7 +49,7 @@ int32_t tsNumOfShmThreads = 1; // queue & threads int32_t tsNumOfRpcThreads = 1; int32_t tsNumOfCommitThreads = 2; -int32_t tsNumOfTaskQueueThreads = 1; +int32_t tsNumOfTaskQueueThreads = 4; int32_t tsNumOfMnodeQueryThreads = 4; int32_t tsNumOfMnodeFetchThreads = 1; int32_t tsNumOfMnodeReadThreads = 1; @@ -317,9 +317,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1; if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1; - tsNumOfTaskQueueThreads = tsNumOfCores / 4; - tsNumOfTaskQueueThreads = TRANGE(tsNumOfTaskQueueThreads, 1, 2); - if (cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 1, 1024, 0) != 0) return -1; + tsNumOfTaskQueueThreads = tsNumOfCores / 2; + tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4); + if (cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 4, 1024, 0) != 0) return -1; return 0; } From d992ab1401c17bdd39ebb3b75c34e574fea7fba6 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 30 Jul 2022 09:09:59 +0800 Subject: [PATCH 29/63] fix: nested query can be used as table for join together --- source/libs/planner/src/planSpliter.c | 15 ++- tests/script/tsim/parser/condition_query.sim | 122 +++++++++---------- 2 files changed, 75 insertions(+), 62 deletions(-) diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index bc5e50218b..37f9d7601e 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -248,12 +248,25 @@ static bool stbSplNeedSplitWindow(bool streamQuery, SLogicNode* pNode) { return false; } +static bool stbSplNeedSplitJoin(bool streamQuery, SJoinLogicNode* pJoin) { + if (pJoin->isSingleTableJoin) { + return false; + } + SNode* pChild = NULL; + FOREACH(pChild, pJoin->node.pChildren) { + if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild) && QUERY_NODE_LOGIC_PLAN_JOIN != nodeType(pChild)) { + return false; + } + } + return true; +} + static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { switch (nodeType(pNode)) { case QUERY_NODE_LOGIC_PLAN_SCAN: return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode); case QUERY_NODE_LOGIC_PLAN_JOIN: - return !(((SJoinLogicNode*)pNode)->isSingleTableJoin); + return stbSplNeedSplitJoin(streamQuery, (SJoinLogicNode*)pNode); case QUERY_NODE_LOGIC_PLAN_PARTITION: return stbSplIsMultiTbScanChild(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_AGG: diff --git a/tests/script/tsim/parser/condition_query.sim b/tests/script/tsim/parser/condition_query.sim index 87ea9f3cbe..bc10864c81 100644 --- a/tests/script/tsim/parser/condition_query.sim +++ b/tests/script/tsim/parser/condition_query.sim @@ -37,7 +37,7 @@ sql select * from stb1 where c8 < 'nuLl'; sql select * from stb1 where c9 > 'nuLl'; sql_error select * from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b; sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 or b.c1 < 60; -sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60)); +sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60)); sql select * from stb1 where 'c2' is null; sql select * from stb1 where 'c2' is not null; @@ -1475,66 +1475,66 @@ if $data20 != @21-05-05 18:19:25.000@ then return -1 endi -#sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts; -#sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50; -#if $rows != 4 then -# return -1 -#endi -#if $data00 != @21-05-05 18:19:20.000@ then -# return -1 -#endi -#if $data10 != @21-05-05 18:19:21.000@ then -# return -1 -#endi -#if $data20 != @21-05-05 18:19:24.000@ then -# return -1 -#endi -#if $data30 != @21-05-05 18:19:25.000@ then -# return -1 -#endi +sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts; +sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:25.000@ then + return -1 +endi -#sql select a.ts,a.c1,a.c8,a.c9 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts; -#sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts; -#if $rows != 2 then -# return -1 -#endi -#if $data00 != @21-05-05 18:19:20.000@ then -# return -1 -#endi -#if $data10 != @21-05-05 18:19:21.000@ then -# return -1 -#endi -# -#sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts; -#if $rows != 4 then -# return -1 -#endi -#if $data00 != @21-05-05 18:19:00.000@ then -# return -1 -#endi -#if $data10 != @21-05-05 18:19:02.000@ then -# return -1 -#endi -#if $data20 != @21-05-05 18:19:12.000@ then -# return -1 -#endi -#if $data30 != @21-05-05 18:19:14.000@ then -# return -1 -#endi -# -#sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts; -#if $rows != 3 then -# return -1 -#endi -#if $data00 != @21-05-05 18:19:04.000@ then -# return -1 -#endi -#if $data10 != @21-05-05 18:19:06.000@ then -# return -1 -#endi -#if $data20 != @21-05-05 18:19:10.000@ then -# return -1 -#endi +sql select a.ts,a.c1,a.c8,a.c9 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts; +sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi + +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:14.000@ then + return -1 +endi + +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi sql select * from stb1 where c1 is null and c1 is not null; if $rows != 0 then @@ -2574,7 +2574,7 @@ sql select * from stb1 where c1 > 0 or t1 > 0 and c1 > 1 sql select * from stb1 where t1 > 0 or t1 > 0 and c1 > 1 sql select * from stb1 where (c1 > 0 and t1 > 0 ) or (t1 > 1 and c1 > 3) sql select * from stb1 where (c1 > 0 and t1 > 0 ) or t1 > 1 -sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.t1=b.t1; +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.t1=b.t1; sql select * from stb1 where c1 < 63 and t1 > 5 if $rows != 2 then From 2f1c9b3c1673dc6db848775cfa3936e9025a44dc Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Sat, 30 Jul 2022 09:50:19 +0800 Subject: [PATCH 30/63] enh: add local forbidden cfg processing --- include/common/tglobal.h | 1 + source/common/src/tglobal.c | 14 ++++++++++++++ source/libs/command/src/command.c | 7 +++++++ 3 files changed, 22 insertions(+) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 57d1199e17..9111728e1a 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -146,6 +146,7 @@ struct SConfig *taosGetCfg(); void taosSetAllDebugFlag(int32_t flag); void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal); int32_t taosSetCfg(SConfig *pCfg, char *name); +void taosLocalCfgForbiddenToChange(char* name, bool* forbidden); #ifdef __cplusplus } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index ba7f9f93c1..6f4a3060ed 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -594,6 +594,20 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { return 0; } +void taosLocalCfgForbiddenToChange(char* name, bool* forbidden) { + int32_t len = strlen(name); + char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0}; + strntolower(lowcaseName, name, TMIN(CFG_NAME_MAX_LEN, len)); + + if (strcasecmp("charset", name) == 0) { + *forbidden = true; + return; + } + + *forbidden = false; +} + + int32_t taosSetCfg(SConfig *pCfg, char *name) { int32_t len = strlen(name); char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0}; diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 7c95e71823..a76b457422 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -517,6 +517,13 @@ static int32_t execAlterLocal(SAlterLocalStmt* pStmt) { goto _return; } + bool forbidden = false; + taosLocalCfgForbiddenToChange(pStmt->config, &forbidden); + if (forbidden) { + terrno = TSDB_CODE_OPS_NOT_SUPPORT; + return terrno; + } + if (cfgSetItem(tsCfg, pStmt->config, pStmt->value, CFG_STYPE_ALTER_CMD)) { return terrno; } From ac169b8d90d5c1b571a19b11aa8f532db64e4f31 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 30 Jul 2022 10:30:58 +0800 Subject: [PATCH 31/63] fix index bugf --- include/common/ttypes.h | 161 ++++++++++++------------ include/util/types.h | 8 +- source/dnode/vnode/inc/vnode.h | 4 +- source/dnode/vnode/src/meta/metaOpen.c | 3 +- source/dnode/vnode/src/meta/metaQuery.c | 4 +- source/libs/index/src/indexFilter.c | 19 ++- 6 files changed, 106 insertions(+), 93 deletions(-) diff --git a/include/common/ttypes.h b/include/common/ttypes.h index ec70dffd34..4b3e11f947 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -143,84 +143,84 @@ typedef struct { } \ } while (0) -#define SET_TYPED_DATA_MIN(_v, _type) \ - do { \ - switch (_type) { \ - case TSDB_DATA_TYPE_BOOL: \ - case TSDB_DATA_TYPE_TINYINT: \ - *(int8_t *)(_v) = INT8_MIN; \ - break; \ - case TSDB_DATA_TYPE_SMALLINT: \ - *(int16_t *)(_v) = INT16_MIN; \ - break; \ - case TSDB_DATA_TYPE_INT: \ - *(int32_t *)(_v) = INT32_MIN; \ - break; \ - case TSDB_DATA_TYPE_BIGINT: \ - case TSDB_DATA_TYPE_TIMESTAMP: \ - *(int64_t *)(_v) = INT64_MIN; \ - break; \ - case TSDB_DATA_TYPE_FLOAT: \ - *(float *)(_v) = FLT_MIN; \ - break; \ - case TSDB_DATA_TYPE_DOUBLE: \ - *(double *)(_v) = DBL_MIN; \ - break; \ - case TSDB_DATA_TYPE_UTINYINT: \ - *(uint8_t *)(_v) = 0; \ - break; \ - case TSDB_DATA_TYPE_USMALLINT: \ - *(uint16_t *)(_v) = 0; \ - break; \ - case TSDB_DATA_TYPE_UBIGINT: \ - *(uint64_t *)(_v) = 0; \ - break; \ - case TSDB_DATA_TYPE_UINT: \ - *(uint32_t *)(_v) = 0; \ - break; \ - default: \ - break; \ - } \ +#define SET_TYPED_DATA_MIN(_v, _type) \ + do { \ + switch (_type) { \ + case TSDB_DATA_TYPE_BOOL: \ + case TSDB_DATA_TYPE_TINYINT: \ + *(int8_t *)(_v) = INT8_MIN; \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ + *(int16_t *)(_v) = INT16_MIN; \ + break; \ + case TSDB_DATA_TYPE_INT: \ + *(int32_t *)(_v) = INT32_MIN; \ + break; \ + case TSDB_DATA_TYPE_BIGINT: \ + case TSDB_DATA_TYPE_TIMESTAMP: \ + *(int64_t *)(_v) = INT64_MIN; \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ + *(float *)(_v) = FLT_MIN; \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ + *(double *)(_v) = DBL_MIN; \ + break; \ + case TSDB_DATA_TYPE_UTINYINT: \ + *(uint8_t *)(_v) = 0; \ + break; \ + case TSDB_DATA_TYPE_USMALLINT: \ + *(uint16_t *)(_v) = 0; \ + break; \ + case TSDB_DATA_TYPE_UBIGINT: \ + *(uint64_t *)(_v) = 0; \ + break; \ + case TSDB_DATA_TYPE_UINT: \ + *(uint32_t *)(_v) = 0; \ + break; \ + default: \ + break; \ + } \ } while (0) -#define SET_TYPED_DATA_MAX(_v, _type) \ - do { \ - switch (_type) { \ - case TSDB_DATA_TYPE_BOOL: \ - case TSDB_DATA_TYPE_TINYINT: \ - *(int8_t *)(_v) = INT8_MAX; \ - break; \ - case TSDB_DATA_TYPE_SMALLINT: \ - *(int16_t *)(_v) = INT16_MAX; \ - break; \ - case TSDB_DATA_TYPE_INT: \ - *(int32_t *)(_v) = INT32_MAX; \ - break; \ - case TSDB_DATA_TYPE_BIGINT: \ - case TSDB_DATA_TYPE_TIMESTAMP: \ - *(int64_t *)(_v) = INT64_MAX; \ - break; \ - case TSDB_DATA_TYPE_FLOAT: \ - *(float *)(_v) = FLT_MAX; \ - break; \ - case TSDB_DATA_TYPE_DOUBLE: \ - *(double *)(_v) = DBL_MAX; \ - break; \ - case TSDB_DATA_TYPE_UTINYINT: \ - *(uint8_t *)(_v) = UINT8_MAX; \ - break; \ - case TSDB_DATA_TYPE_USMALLINT: \ - *(uint16_t *)(_v) = UINT16_MAX; \ - break; \ - case TSDB_DATA_TYPE_UINT: \ - *(uint32_t *)(_v) = UINT32_MAX; \ - break; \ - case TSDB_DATA_TYPE_UBIGINT: \ - *(uint64_t *)(_v) = UINT64_MAX; \ - break; \ - default: \ - break; \ - } \ +#define SET_TYPED_DATA_MAX(_v, _type) \ + do { \ + switch (_type) { \ + case TSDB_DATA_TYPE_BOOL: \ + case TSDB_DATA_TYPE_TINYINT: \ + *(int8_t *)(_v) = INT8_MAX; \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ + *(int16_t *)(_v) = INT16_MAX; \ + break; \ + case TSDB_DATA_TYPE_INT: \ + *(int32_t *)(_v) = INT32_MAX; \ + break; \ + case TSDB_DATA_TYPE_BIGINT: \ + case TSDB_DATA_TYPE_TIMESTAMP: \ + *(int64_t *)(_v) = INT64_MAX; \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ + *(float *)(_v) = FLT_MAX; \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ + *(double *)(_v) = DBL_MAX; \ + break; \ + case TSDB_DATA_TYPE_UTINYINT: \ + *(uint8_t *)(_v) = UINT8_MAX; \ + break; \ + case TSDB_DATA_TYPE_USMALLINT: \ + *(uint16_t *)(_v) = UINT16_MAX; \ + break; \ + case TSDB_DATA_TYPE_UINT: \ + *(uint32_t *)(_v) = UINT32_MAX; \ + break; \ + case TSDB_DATA_TYPE_UBIGINT: \ + *(uint64_t *)(_v) = UINT64_MAX; \ + break; \ + default: \ + break; \ + } \ } while (0) #define NUM_TO_STRING(_inputType, _input, _outputBytes, _output) \ @@ -260,10 +260,9 @@ typedef struct { } \ } while (0) - -//TODO: use varchar(0) to represent NULL type -#define IS_VAR_NULL_TYPE(_t, _b) ((_t) == TSDB_DATA_TYPE_VARCHAR && (_b) == 0) -#define IS_NULL_TYPE(_t) ((_t) == TSDB_DATA_TYPE_NULL) +// TODO: use varchar(0) to represent NULL type +#define IS_VAR_NULL_TYPE(_t, _b) ((_t) == TSDB_DATA_TYPE_VARCHAR && (_b) == 0) +#define IS_NULL_TYPE(_t) ((_t) == TSDB_DATA_TYPE_NULL) #define IS_SIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_TINYINT && (_t) <= TSDB_DATA_TYPE_BIGINT) #define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT) @@ -329,7 +328,7 @@ typedef struct tDataTypeDescriptor { int16_t type; int16_t nameLen; int32_t bytes; - char * name; + char *name; int64_t minValue; int64_t maxValue; int32_t (*compFunc)(const char *const input, int32_t inputSize, const int32_t nelements, char *const output, diff --git a/include/util/types.h b/include/util/types.h index 1360307156..8dd0947e9c 100644 --- a/include/util/types.h +++ b/include/util/types.h @@ -81,11 +81,11 @@ static FORCE_INLINE double taos_align_get_double(const char *pBuf) { typedef uint16_t VarDataLenT; // maxVarDataLen: 32767 #define VARSTR_HEADER_SIZE sizeof(VarDataLenT) -#define varDataLen(v) ((VarDataLenT *)(v))[0] -#define varDataVal(v) ((char *)(v) + VARSTR_HEADER_SIZE) +#define varDataLen(v) ((VarDataLenT *)(v))[0] +#define varDataVal(v) ((char *)(v) + VARSTR_HEADER_SIZE) #define varDataTLen(v) (sizeof(VarDataLenT) + varDataLen(v)) -#define NCHAR_WIDTH_TO_BYTES(n) ((n) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE) +#define NCHAR_WIDTH_TO_BYTES(n) ((n)*TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE) typedef int32_t VarDataOffsetT; @@ -98,4 +98,4 @@ typedef struct tstr { } #endif -#endif /*_TD_TYPES_H_*/ \ No newline at end of file +#endif /*_TD_TYPES_H_*/ diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 4d95a9d7a5..dcf374f4c4 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -89,13 +89,13 @@ void metaReaderClear(SMetaReader *pReader); int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid); int32_t metaReadNext(SMetaReader *pReader); const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *tagVal); -int metaGetTableNameByUid(void* meta, uint64_t uid, char* tbName); +int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName); typedef struct SMetaFltParam { tb_uid_t suid; int16_t cid; int16_t type; - char *val; + void *val; bool reverse; int (*filterFunc)(void *a, void *b, int16_t type); diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 7c7d14e337..195723562c 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -315,7 +315,8 @@ static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL return 1; } else if (!pTagIdxKey1->isNull && !pTagIdxKey2->isNull) { // all not NULL, compr tag vals - c = doCompare(pTagIdxKey1->data, pTagIdxKey2->data, pTagIdxKey1->type, 0); + __compar_fn_t func = getComparFunc(pTagIdxKey1->type, 0); + c = func(pTagIdxKey1->data, pTagIdxKey2->data); if (c) return c; } diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 142ad181d5..6a961b7593 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -98,9 +98,9 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) { return uid; } -int metaGetTableNameByUid(void* meta, uint64_t uid, char* tbName) { +int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName) { SMetaReader mr = {0}; - metaReaderInit(&mr, (SMeta*)meta, 0); + metaReaderInit(&mr, (SMeta *)meta, 0); metaGetTableEntryByUid(&mr, uid); STR_TO_VARSTR(tbName, mr.me.name); diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 1f257bb05f..6fa7422b66 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -385,14 +385,27 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP .reverse = reverse, .filterFunc = filterFunc}; - char buf[128] = {0}; + char buf[128] = {0}; + float f = 0.0; if (IS_VAR_DATA_TYPE(left->colValType)) { if (!IS_VAR_DATA_TYPE(right->colValType)) { - NUM_TO_STRING(right->colValType, right->condValue, sizeof(buf), buf + VARSTR_HEADER_SIZE); + NUM_TO_STRING(right->colValType, right->condValue, sizeof(buf) - 2, buf + VARSTR_HEADER_SIZE); varDataSetLen(buf, strlen(buf + VARSTR_HEADER_SIZE)); - param.val = buf; } + } else { + if (left->colValType == TSDB_DATA_TYPE_FLOAT) { + if (right->colValType == TSDB_DATA_TYPE_DOUBLE) { + f = GET_DOUBLE_VAL(right->condValue); + param.val = &f; + } else if (right->colValType == TSDB_DATA_TYPE_BIGINT) { + f = *(int64_t *)(right->condValue); + param.val = &f; + } else { + f = *(int32_t *)(right->condValue); + param.val = &f; + } + } } ret = metaFilterTableIds(arg->metaEx, ¶m, output->result); } From 3383a572814e177130ceca1e289eb2d340cb935d Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 30 Jul 2022 10:44:54 +0800 Subject: [PATCH 32/63] fix: fixed order by tbname for first last query --- tests/script/tsim/parser/first_last_query.sim | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/script/tsim/parser/first_last_query.sim b/tests/script/tsim/parser/first_last_query.sim index adad554fb2..73943a812c 100644 --- a/tests/script/tsim/parser/first_last_query.sim +++ b/tests/script/tsim/parser/first_last_query.sim @@ -283,7 +283,8 @@ print ================== server restart completed sql connect sql use first_db0; -sql select last(*), tbname from m1 group by tbname; +sql select last(*), tbname from m1 group by tbname order by tbname; +print $data00 $data01 $data02 $data10 $data11 $data12 if $rows != 2 then return -1 endi @@ -312,4 +313,4 @@ if $data12 != @tm1@ then return -1 endi -sql drop table m1 \ No newline at end of file +sql drop table m1 From e6c649e6748a5bf4f373052b3254841890c31de2 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Sat, 30 Jul 2022 10:45:27 +0800 Subject: [PATCH 33/63] update --- .../4dnode1mnode_basic_replica3_insertdatas_stop_leader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py index a50fd1b8ca..a31f9750df 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py @@ -352,9 +352,9 @@ class TDTestCase: tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(dbname , time_cost)) self.current_thread.join() - tdDnodes[self.stop_dnode_id-1].starttaosd() self.wait_start_dnode_OK(newTdSql) + tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1")) tdLog.debug(" ==== expected insert {} rows of database {} , really is {}".format(total_rows, dbname , tdSql.queryResult[0][0])) From 965cc65bef2ddecbced8c684c3f75e476d020621 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Sat, 30 Jul 2022 11:04:23 +0800 Subject: [PATCH 34/63] fix: fix catalog deaklock issue --- source/libs/catalog/inc/catalogInt.h | 2 + source/libs/catalog/src/catalog.c | 111 +++++++++++++++------------ source/libs/catalog/src/ctgAsync.c | 2 +- 3 files changed, 65 insertions(+), 50 deletions(-) diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 1aaa1ecfd7..5b5c6010e8 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -679,6 +679,8 @@ void ctgClearSubTaskRes(SCtgSubRes *pRes); void ctgFreeQNode(SCtgQNode *node); void ctgClearHandle(SCatalog* pCtg); void ctgFreeTbCacheImpl(SCtgTbCache *pCache); +int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName); +int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup); extern SCatalogMgmt gCtgMgmt; diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 59f11898fa..933e65e582 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -92,7 +92,7 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx* int32_t code = 0; if (!CTG_FLAG_IS_SYS_DB(ctx->flag)) { - CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo)); + CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo)); } STableMetaOutput moutput = {0}; @@ -337,7 +337,10 @@ int32_t ctgGetTbType(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName, } STableMeta* pMeta = NULL; - CTG_ERR_RET(catalogGetTableMeta(pCtg, pConn, pTableName, &pMeta)); + SCtgTbMetaCtx ctx = {0}; + ctx.pName = (SName*)pTableName; + ctx.flag = CTG_FLAG_UNKNOWN_STB; + CTG_ERR_RET(ctgGetTbMeta(pCtg, pConn, &ctx, &pMeta)); *tbType = pMeta->tableType; taosMemoryFree(pMeta); @@ -391,7 +394,7 @@ int32_t ctgGetTbCfg(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName, CTG_ERR_RET(ctgGetTableCfgFromMnode(pCtg, pConn, pTableName, pCfg, NULL)); } else { SVgroupInfo vgroupInfo = {0}; - CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, pTableName, &vgroupInfo)); + CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, pTableName, &vgroupInfo)); CTG_ERR_RET(ctgGetTableCfgFromVnode(pCtg, pConn, pTableName, &vgroupInfo, pCfg, NULL)); } @@ -477,6 +480,57 @@ _return: CTG_RET(code); } + +int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) { + if (IS_SYS_DBNAME(pTableName->dbname)) { + ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname); + CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); + } + + SCtgDBCache* dbCache = NULL; + int32_t code = 0; + char db[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(pTableName, db); + + SDBVgInfo *vgInfo = NULL; + CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo)); + + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup)); + +_return: + + if (dbCache) { + ctgRUnlockVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + if (vgInfo) { + taosHashCleanup(vgInfo->vgHash); + taosMemoryFreeClear(vgInfo); + } + + CTG_RET(code); +} + +int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName) { + int32_t code = 0; + + if (NULL == pCtg || NULL == pTableName) { + CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); + } + + if (NULL == pCtg->dbCache) { + return TSDB_CODE_SUCCESS; + } + + CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true)); + +_return: + + CTG_RET(code); +} + + int32_t catalogInit(SCatalogCfg* cfg) { if (gCtgMgmt.pCluster) { qError("catalog already initialized"); @@ -772,21 +826,7 @@ _return: int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) { CTG_API_ENTER(); - int32_t code = 0; - - if (NULL == pCtg || NULL == pTableName) { - CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); - } - - if (NULL == pCtg->dbCache) { - CTG_API_LEAVE(TSDB_CODE_SUCCESS); - } - - CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true)); - -_return: - - CTG_API_LEAVE(code); + CTG_API_LEAVE(ctgRemoveTbMeta(pCtg, pTableName)); } int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* stbName, uint64_t suid) { @@ -878,12 +918,12 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray* case TSDB_CHILD_TABLE: { SName stb = name; strcpy(stb.tname, stbName); - catalogRemoveTableMeta(pCtg, &stb); + ctgRemoveTbMeta(pCtg, &stb); break; } case TSDB_SUPER_TABLE: case TSDB_NORMAL_TABLE: - catalogRemoveTableMeta(pCtg, &name); + ctgRemoveTbMeta(pCtg, &name); break; default: ctgError("ignore table type %d", tbType); @@ -947,34 +987,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const int32_t catalogGetTableHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) { CTG_API_ENTER(); - if (IS_SYS_DBNAME(pTableName->dbname)) { - ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname); - CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); - } - - SCtgDBCache* dbCache = NULL; - int32_t code = 0; - char db[TSDB_DB_FNAME_LEN] = {0}; - tNameGetFullDbName(pTableName, db); - - SDBVgInfo *vgInfo = NULL; - CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo)); - - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup)); - -_return: - - if (dbCache) { - ctgRUnlockVgInfo(dbCache); - ctgReleaseDBCache(pCtg, dbCache); - } - - if (vgInfo) { - taosHashCleanup(vgInfo->vgHash); - taosMemoryFreeClear(vgInfo); - } - - CTG_API_LEAVE(code); + CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup)); } int32_t catalogGetAllMeta(SCatalog* pCtg, SRequestConnInfo *pConn, const SCatalogReq* pReq, SMetaData* pRsp) { @@ -1200,7 +1213,7 @@ int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo *pConn, const } int32_t code = 0; - CTG_ERR_JRET(catalogRemoveTableMeta(pCtg, (SName*)pTableName)); + CTG_ERR_JRET(ctgRemoveTbMeta(pCtg, (SName*)pTableName)); CTG_ERR_JRET(ctgGetTbCfg(pCtg, pConn, (SName*)pTableName, pCfg)); diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index f4cee13ec0..0184ac3a60 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -398,7 +398,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con SName* name = taosHashIterate(pTb, NULL); while (name) { - catalogRemoveTableMeta(pCtg, name); + ctgRemoveTbMeta(pCtg, name); name = taosHashIterate(pTb, name); } From 80a32d623535afdf99111c8cf0debafcbe127998 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Sat, 30 Jul 2022 11:31:05 +0800 Subject: [PATCH 35/63] fix: fix mbs2ucs4 crash issue in taosd --- include/os/osString.h | 1 - source/client/src/clientEnv.c | 2 -- source/dnode/mgmt/node_mgmt/src/dmMgmt.c | 1 + source/os/src/osString.c | 14 +++++++------- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/include/os/osString.h b/include/os/osString.h index 74d5e52a1b..2a22c6d8ff 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -62,7 +62,6 @@ typedef int32_t TdUcs4; int32_t taosUcs4len(TdUcs4 *ucs4); int64_t taosStr2int64(const char *str); -int32_t taosConvInit(int32_t maxNum); void taosConvDestroy(); int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs); bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len); diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index c012533056..893aa39ce3 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -361,8 +361,6 @@ void taos_init_imp(void) { initQueryModuleMsgHandle(); - taosConvInit(256); - rpcInit(); SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100}; diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index c2e8a55271..582b16ce99 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -215,6 +215,7 @@ void dmCleanupDnode(SDnode *pDnode) { dmClearVars(pDnode); rpcCleanup(); indexCleanup(); + taosConvDestroy(); dDebug("dnode is closed, ptr:%p", pDnode); } diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 329b039084..503b8e0c6b 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -143,14 +143,17 @@ SConv *gConv = NULL; int32_t convUsed = 0; int32_t gConvMaxNum = 0; -int32_t taosConvInit(int32_t maxNum) { - gConvMaxNum = maxNum * 2; +void taosConvInitImpl(void) { + gConvMaxNum = 512; gConv = taosMemoryCalloc(gConvMaxNum, sizeof(SConv)); for (int32_t i = 0; i < gConvMaxNum; ++i) { gConv[i].conv = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); } +} - return 0; +static TdThreadOnce convInit = PTHREAD_ONCE_INIT; +void taosConvInit() { + taosThreadOnce(&convInit, taosConvInitImpl); } void taosConvDestroy() { @@ -162,10 +165,7 @@ void taosConvDestroy() { void taosAcquireConv(int32_t *idx) { if (0 == gConvMaxNum) { - gConv = taosMemoryCalloc(1, sizeof(SConv)); - gConv[0].conv = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); - *idx = 0; - return; + taosConvInit(); } while (true) { From 56d26973080fd6da11b594862138e81baaf9378e Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Sat, 30 Jul 2022 12:46:40 +0800 Subject: [PATCH 36/63] fix: new tRowMergerInit2 for ts row merging --- source/dnode/vnode/src/inc/tsdb.h | 3 + source/dnode/vnode/src/tsdb/tsdbRead.c | 50 ++++++++---- source/dnode/vnode/src/tsdb/tsdbUtil.c | 108 ++++++++++++++++++++++--- 3 files changed, 135 insertions(+), 26 deletions(-) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index d8c84e952b..bbf100cf3a 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -90,6 +90,9 @@ int32_t tsdbRowCmprFn(const void *p1, const void *p2); void tRowIterInit(SRowIter *pIter, TSDBROW *pRow, STSchema *pTSchema); SColVal *tRowIterNext(SRowIter *pIter); // SRowMerger +int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema); +int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema); + int32_t tRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema); void tRowMergerClear(SRowMerger *pMerger); int32_t tRowMerge(SRowMerger *pMerger, TSDBROW *pRow); diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0eec715bd1..9545ded927 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -143,7 +143,7 @@ static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, i static TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader); static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader, SRowMerger* pMerger); -static int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMerger* pMerger, +static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger, STsdbReader* pReader); static int32_t doAppendOneRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow); static void setComposedBlockFlag(STsdbReader* pReader, bool composed); @@ -1230,7 +1230,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); tRowMerge(&merge, pRow); - doMergeRowsInBuf(pIter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); tRowMergerGetRow(&merge, &pTSRow); } @@ -1246,7 +1246,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* updateSchema(pRow, pBlockScanInfo->uid, pReader); tRowMergerInit(&merge, pRow, pReader->pSchema); - doMergeRowsInBuf(pIter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); tRowMerge(&merge, &fRow); doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); @@ -1291,12 +1291,12 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* if (ik.ts == key) { tRowMerge(&merge, piRow); - doMergeRowsInBuf(&pBlockScanInfo->iiter, key, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iiter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader); } if (k.ts == key) { tRowMerge(&merge, pRow); - doMergeRowsInBuf(&pBlockScanInfo->iter, key, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader); } tRowMergerGetRow(&merge, &pTSRow); @@ -1336,11 +1336,11 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* updateSchema(pRow, uid, pReader); tRowMergerInit(&merge, pRow, pReader->pSchema); - doMergeRowsInBuf(&pBlockScanInfo->iter, key, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader); if (ik.ts == k.ts) { tRowMerge(&merge, piRow); - doMergeRowsInBuf(&pBlockScanInfo->iiter, key, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iiter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader); } if (k.ts == key) { @@ -2097,7 +2097,8 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea } } -int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMerger* pMerger, STsdbReader* pReader) { +int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger, + STsdbReader* pReader) { while (1) { pIter->hasVal = tsdbTbDataIterNext(pIter->iter); if (!pIter->hasVal) { @@ -2116,7 +2117,15 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMer break; } - tRowMerge(pMerger, pRow); + int32_t sversion = TSDBROW_SVERSION(pRow); + STSchema* pTSchema = NULL; + if (sversion != pReader->pSchema->version) { + metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema); + } else { + pTSchema = pReader->pSchema; + } + + tRowMergerAdd(pMerger, pRow, pTSchema); } return TSDB_CODE_SUCCESS; @@ -2229,7 +2238,7 @@ void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader) { if (pReader->pSchema == NULL) { metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema); - } else if (pReader->pSchema->version < sversion) { + } else if (pReader->pSchema->version != sversion) { taosMemoryFreeClear(pReader->pSchema); metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema); } @@ -2240,10 +2249,17 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe SRowMerger merge = {0}; TSDBKEY k = TSDBROW_KEY(pRow); - updateSchema(pRow, uid, pReader); + // updateSchema(pRow, uid, pReader); + int32_t sversion = TSDBROW_SVERSION(pRow); + STSchema* pTSchema = NULL; + if (sversion != pReader->pSchema->version) { + metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema); + } else { + pTSchema = pReader->pSchema; + } - tRowMergerInit(&merge, pRow, pReader->pSchema); - doMergeRowsInBuf(pIter, k.ts, pDelList, &merge, pReader); + tRowMergerInit2(&merge, pReader->pSchema, pRow, pTSchema); + doMergeRowsInBuf(pIter, uid, k.ts, pDelList, &merge, pReader); tRowMergerGetRow(&merge, pTSRow); tRowMergerClear(&merge); } @@ -2259,18 +2275,18 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo updateSchema(piRow, pBlockScanInfo->uid, pReader); tRowMergerInit(&merge, piRow, pReader->pSchema); - doMergeRowsInBuf(&pBlockScanInfo->iiter, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader); tRowMerge(&merge, pRow); - doMergeRowsInBuf(&pBlockScanInfo->iter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); } else { updateSchema(pRow, pBlockScanInfo->uid, pReader); tRowMergerInit(&merge, pRow, pReader->pSchema); - doMergeRowsInBuf(&pBlockScanInfo->iter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); tRowMerge(&merge, piRow); - doMergeRowsInBuf(&pBlockScanInfo->iiter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); } tRowMergerGetRow(&merge, pTSRow); diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 3e05b75dd0..211a330599 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "tdataformat.h" #include "tsdb.h" // SMapData ======================================================================= @@ -568,6 +569,95 @@ SColVal *tRowIterNext(SRowIter *pIter) { } // SRowMerger ====================================================== + +int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema) { + int32_t code = 0; + TSDBKEY key = TSDBROW_KEY(pRow); + SColVal *pColVal = &(SColVal){0}; + STColumn *pTColumn; + int32_t iCol = 0; + + pMerger->pTSchema = pResTSchema; + pMerger->version = key.version; + + pMerger->pArray = taosArrayInit(pResTSchema->numOfCols, sizeof(SColVal)); + if (pMerger->pArray == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + // ts + pTColumn = &pTSchema->columns[iCol++]; + + ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP); + + *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.ts = key.ts}); + if (taosArrayPush(pMerger->pArray, pColVal) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + // other + for (; iCol < pTSchema->numOfCols && iCol < pResTSchema->numOfCols; ++iCol) { + pTColumn = &pResTSchema->columns[iCol]; + if (pTSchema->columns[iCol].colId != pTColumn->colId) { + taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type)); + continue; + } + + tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal); + if (taosArrayPush(pMerger->pArray, pColVal) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + } + + for (; iCol < pResTSchema->numOfCols; ++iCol) { + pTColumn = &pResTSchema->columns[iCol]; + taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type)); + } + +_exit: + return code; +} + +int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) { + int32_t code = 0; + TSDBKEY key = TSDBROW_KEY(pRow); + SColVal *pColVal = &(SColVal){0}; + STColumn *pTColumn; + int32_t iCol; + + ASSERT(((SColVal *)pMerger->pArray->pData)->value.ts == key.ts); + + for (iCol = 1; iCol < pMerger->pTSchema->numOfCols && iCol < pTSchema->numOfCols; ++iCol) { + pTColumn = &pMerger->pTSchema->columns[iCol]; + if (pTSchema->columns[iCol].colId != pTColumn->colId) { + continue; + } + + tsdbRowGetColVal(pRow, pMerger->pTSchema, iCol, pColVal); + + if (key.version > pMerger->version) { + if (!pColVal->isNone) { + taosArraySet(pMerger->pArray, iCol, pColVal); + } + } else if (key.version < pMerger->version) { + SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol); + if (tColVal->isNone && !pColVal->isNone) { + taosArraySet(pMerger->pArray, iCol, pColVal); + } + } else { + ASSERT(0); + } + } + + pMerger->version = key.version; + +_exit: + return code; +} + int32_t tRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) { int32_t code = 0; TSDBKEY key = TSDBROW_KEY(pRow); @@ -1401,7 +1491,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { break; case TSDB_DATA_TYPE_BOOL: break; - case TSDB_DATA_TYPE_TINYINT:{ + case TSDB_DATA_TYPE_TINYINT: { pColAgg->sum += colVal.value.i8; if (pColAgg->min > colVal.value.i8) { pColAgg->min = colVal.value.i8; @@ -1411,7 +1501,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_SMALLINT:{ + case TSDB_DATA_TYPE_SMALLINT: { pColAgg->sum += colVal.value.i16; if (pColAgg->min > colVal.value.i16) { pColAgg->min = colVal.value.i16; @@ -1441,7 +1531,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_FLOAT:{ + case TSDB_DATA_TYPE_FLOAT: { pColAgg->sum += colVal.value.f; if (pColAgg->min > colVal.value.f) { pColAgg->min = colVal.value.f; @@ -1451,7 +1541,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_DOUBLE:{ + case TSDB_DATA_TYPE_DOUBLE: { pColAgg->sum += colVal.value.d; if (pColAgg->min > colVal.value.d) { pColAgg->min = colVal.value.d; @@ -1463,7 +1553,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } case TSDB_DATA_TYPE_VARCHAR: break; - case TSDB_DATA_TYPE_TIMESTAMP:{ + case TSDB_DATA_TYPE_TIMESTAMP: { if (pColAgg->min > colVal.value.i64) { pColAgg->min = colVal.value.i64; } @@ -1474,7 +1564,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } case TSDB_DATA_TYPE_NCHAR: break; - case TSDB_DATA_TYPE_UTINYINT:{ + case TSDB_DATA_TYPE_UTINYINT: { pColAgg->sum += colVal.value.u8; if (pColAgg->min > colVal.value.u8) { pColAgg->min = colVal.value.u8; @@ -1484,7 +1574,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_USMALLINT:{ + case TSDB_DATA_TYPE_USMALLINT: { pColAgg->sum += colVal.value.u16; if (pColAgg->min > colVal.value.u16) { pColAgg->min = colVal.value.u16; @@ -1494,7 +1584,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_UINT:{ + case TSDB_DATA_TYPE_UINT: { pColAgg->sum += colVal.value.u32; if (pColAgg->min > colVal.value.u32) { pColAgg->min = colVal.value.u32; @@ -1504,7 +1594,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_UBIGINT:{ + case TSDB_DATA_TYPE_UBIGINT: { pColAgg->sum += colVal.value.u64; if (pColAgg->min > colVal.value.u64) { pColAgg->min = colVal.value.u64; From b08a28d559ba4b88db56c638d8697de22706a9b3 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 30 Jul 2022 13:14:44 +0800 Subject: [PATCH 37/63] fix: some problems of planner --- include/libs/nodes/querynodes.h | 2 - include/libs/scalar/filter.h | 4 + include/libs/scalar/scalar.h | 6 +- source/libs/nodes/src/nodesUtilFuncs.c | 184 ------------------ source/libs/parser/src/parAstCreater.c | 37 ++-- source/libs/parser/src/parTranslater.c | 43 ++++- source/libs/parser/test/parSelectTest.cpp | 2 + source/libs/planner/src/planOptimizer.c | 2 +- source/libs/planner/src/planSpliter.c | 2 + source/libs/planner/test/planOrderByTest.cpp | 2 + source/libs/scalar/src/filter.c | 192 +++++++++++++++++++ 11 files changed, 270 insertions(+), 206 deletions(-) diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 5dc1e7512f..cd2a7d3f9f 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -418,8 +418,6 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal); char* nodesGetFillModeString(EFillMode mode); int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc); -int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, - SNode** pOtherCond); #ifdef __cplusplus } diff --git a/include/libs/scalar/filter.h b/include/libs/scalar/filter.h index f1cd99f04a..44064f0a9f 100644 --- a/include/libs/scalar/filter.h +++ b/include/libs/scalar/filter.h @@ -46,6 +46,10 @@ extern int32_t filterFreeNcharColumns(SFilterInfo *pFilterInfo); extern void filterFreeInfo(SFilterInfo *info); extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t numOfCols, int32_t numOfRows); +/* condition split interface */ +int32_t filterPartitionCond(SNode **pCondition, SNode **pPrimaryKeyCond, SNode **pTagIndexCond, SNode **pTagCond, + SNode **pOtherCond); + #ifdef __cplusplus } #endif diff --git a/include/libs/scalar/scalar.h b/include/libs/scalar/scalar.h index 770b0442ea..6322c6a1e9 100644 --- a/include/libs/scalar/scalar.h +++ b/include/libs/scalar/scalar.h @@ -25,7 +25,7 @@ extern "C" { typedef struct SFilterInfo SFilterInfo; -int32_t scalarGetOperatorResultType(SOperatorNode* pOp); +int32_t scalarGetOperatorResultType(SOperatorNode *pOp); /* pNode will be freed in API; @@ -43,7 +43,7 @@ int32_t scalarGetOperatorParamNum(EOperatorType type); int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type); int32_t vectorGetConvertType(int32_t type1, int32_t type2); -int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut, int32_t* overflow); +int32_t vectorConvertImpl(const SScalarParam *pIn, SScalarParam *pOut, int32_t *overflow); /* Math functions */ int32_t absFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); @@ -86,7 +86,7 @@ int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutpu int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); -bool getTimePseudoFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); +bool getTimePseudoFuncEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv); int32_t winStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t winEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 3c6fbe409c..b7bcd13aa0 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -1816,187 +1816,3 @@ int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc) { return TSDB_CODE_SUCCESS; } - -typedef struct SClassifyConditionCxt { - bool hasPrimaryKey; - bool hasTagIndexCol; - bool hasTagCol; - bool hasOtherCol; -} SClassifyConditionCxt; - -static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) { - SClassifyConditionCxt* pCxt = (SClassifyConditionCxt*)pContext; - if (QUERY_NODE_COLUMN == nodeType(pNode)) { - SColumnNode* pCol = (SColumnNode*)pNode; - if (PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && TSDB_SYSTEM_TABLE != pCol->tableType) { - pCxt->hasPrimaryKey = true; - } else if (pCol->hasIndex) { - pCxt->hasTagIndexCol = true; - pCxt->hasTagCol = true; - } else if (COLUMN_TYPE_TAG == pCol->colType || COLUMN_TYPE_TBNAME == pCol->colType) { - pCxt->hasTagCol = true; - } else { - pCxt->hasOtherCol = true; - } - } - return DEAL_RES_CONTINUE; -} - -typedef enum EConditionType { - COND_TYPE_PRIMARY_KEY = 1, - COND_TYPE_TAG_INDEX, - COND_TYPE_TAG, - COND_TYPE_NORMAL -} EConditionType; - -static EConditionType classifyCondition(SNode* pNode) { - SClassifyConditionCxt cxt = {.hasPrimaryKey = false, .hasTagIndexCol = false, .hasOtherCol = false}; - nodesWalkExpr(pNode, classifyConditionImpl, &cxt); - return cxt.hasOtherCol ? COND_TYPE_NORMAL - : (cxt.hasPrimaryKey && cxt.hasTagCol - ? COND_TYPE_NORMAL - : (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY - : (cxt.hasTagIndexCol ? COND_TYPE_TAG_INDEX : COND_TYPE_TAG))); -} - -static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, - SNode** pOtherCond) { - SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*pCondition); - - int32_t code = TSDB_CODE_SUCCESS; - - SNodeList* pPrimaryKeyConds = NULL; - SNodeList* pTagIndexConds = NULL; - SNodeList* pTagConds = NULL; - SNodeList* pOtherConds = NULL; - SNode* pCond = NULL; - FOREACH(pCond, pLogicCond->pParameterList) { - switch (classifyCondition(pCond)) { - case COND_TYPE_PRIMARY_KEY: - if (NULL != pPrimaryKeyCond) { - code = nodesListMakeAppend(&pPrimaryKeyConds, nodesCloneNode(pCond)); - } - break; - case COND_TYPE_TAG_INDEX: - if (NULL != pTagIndexCond) { - code = nodesListMakeAppend(&pTagIndexConds, nodesCloneNode(pCond)); - } - if (NULL != pTagCond) { - code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); - } - break; - case COND_TYPE_TAG: - if (NULL != pTagCond) { - code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); - } - break; - case COND_TYPE_NORMAL: - default: - if (NULL != pOtherCond) { - code = nodesListMakeAppend(&pOtherConds, nodesCloneNode(pCond)); - } - break; - } - if (TSDB_CODE_SUCCESS != code) { - break; - } - } - - SNode* pTempPrimaryKeyCond = NULL; - SNode* pTempTagIndexCond = NULL; - SNode* pTempTagCond = NULL; - SNode* pTempOtherCond = NULL; - if (TSDB_CODE_SUCCESS == code) { - code = nodesMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodesMergeConds(&pTempTagIndexCond, &pTagIndexConds); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodesMergeConds(&pTempTagCond, &pTagConds); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodesMergeConds(&pTempOtherCond, &pOtherConds); - } - - if (TSDB_CODE_SUCCESS == code) { - if (NULL != pPrimaryKeyCond) { - *pPrimaryKeyCond = pTempPrimaryKeyCond; - } - if (NULL != pTagIndexCond) { - *pTagIndexCond = pTempTagIndexCond; - } - if (NULL != pTagCond) { - *pTagCond = pTempTagCond; - } - if (NULL != pOtherCond) { - *pOtherCond = pTempOtherCond; - } - nodesDestroyNode(*pCondition); - *pCondition = NULL; - } else { - nodesDestroyList(pPrimaryKeyConds); - nodesDestroyList(pTagIndexConds); - nodesDestroyList(pTagConds); - nodesDestroyList(pOtherConds); - nodesDestroyNode(pTempPrimaryKeyCond); - nodesDestroyNode(pTempTagIndexCond); - nodesDestroyNode(pTempTagCond); - nodesDestroyNode(pTempOtherCond); - } - - return code; -} - -int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, - SNode** pOtherCond) { - if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCondition) && - LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCondition)->condType) { - return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagIndexCond, pTagCond, pOtherCond); - } - - bool needOutput = false; - switch (classifyCondition(*pCondition)) { - case COND_TYPE_PRIMARY_KEY: - if (NULL != pPrimaryKeyCond) { - *pPrimaryKeyCond = *pCondition; - needOutput = true; - } - break; - case COND_TYPE_TAG_INDEX: - if (NULL != pTagIndexCond) { - *pTagIndexCond = *pCondition; - needOutput = true; - } - if (NULL != pTagCond) { - SNode* pTempCond = *pCondition; - if (NULL != pTagIndexCond) { - pTempCond = nodesCloneNode(*pCondition); - if (NULL == pTempCond) { - return TSDB_CODE_OUT_OF_MEMORY; - } - } - *pTagCond = pTempCond; - needOutput = true; - } - break; - case COND_TYPE_TAG: - if (NULL != pTagCond) { - *pTagCond = *pCondition; - needOutput = true; - } - break; - case COND_TYPE_NORMAL: - default: - if (NULL != pOtherCond) { - *pOtherCond = *pCondition; - needOutput = true; - } - break; - } - if (needOutput) { - *pCondition = NULL; - } - - return TSDB_CODE_SUCCESS; -} diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index ff2157fe67..d555e86306 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -233,18 +233,22 @@ SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) { CHECK_PARSER_STATUS(pCxt); SRawExprNode* pRawExpr = (SRawExprNode*)pNode; - SNode* pExpr = pRawExpr->pNode; - if (nodesIsExprNode(pExpr)) { + SNode* pRealizedExpr = pRawExpr->pNode; + if (nodesIsExprNode(pRealizedExpr)) { + SExprNode* pExpr = (SExprNode*)pRealizedExpr; if (QUERY_NODE_COLUMN == nodeType(pExpr)) { - strcpy(((SExprNode*)pExpr)->aliasName, ((SColumnNode*)pExpr)->colName); + strcpy(pExpr->aliasName, ((SColumnNode*)pExpr)->colName); + strcpy(pExpr->userAlias, ((SColumnNode*)pExpr)->colName); } else { - int32_t len = TMIN(sizeof(((SExprNode*)pExpr)->aliasName) - 1, pRawExpr->n); - strncpy(((SExprNode*)pExpr)->aliasName, pRawExpr->p, len); - ((SExprNode*)pExpr)->aliasName[len] = '\0'; + int32_t len = TMIN(sizeof(pExpr->aliasName) - 1, pRawExpr->n); + strncpy(pExpr->aliasName, pRawExpr->p, len); + pExpr->aliasName[len] = '\0'; + strncpy(pExpr->userAlias, pRawExpr->p, len); + pExpr->userAlias[len] = '\0'; } } taosMemoryFreeClear(pNode); - return pExpr; + return pRealizedExpr; } SToken getTokenFromRawExprNode(SAstCreateContext* pCxt, SNode* pNode) { @@ -641,11 +645,12 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, SToken* pAlias) { CHECK_PARSER_STATUS(pCxt); trimEscape(pAlias); - int32_t len = TMIN(sizeof(((SExprNode*)pNode)->aliasName) - 1, pAlias->n); - strncpy(((SExprNode*)pNode)->aliasName, pAlias->z, len); - ((SExprNode*)pNode)->aliasName[len] = '\0'; - strncpy(((SExprNode*)pNode)->userAlias, pAlias->z, len); - ((SExprNode*)pNode)->userAlias[len] = '\0'; + SExprNode* pExpr = (SExprNode*)pNode; + int32_t len = TMIN(sizeof(pExpr->aliasName) - 1, pAlias->n); + strncpy(pExpr->aliasName, pAlias->z, len); + pExpr->aliasName[len] = '\0'; + strncpy(pExpr->userAlias, pAlias->z, len); + pExpr->userAlias[len] = '\0'; return pNode; } @@ -766,13 +771,21 @@ SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pPr return (SNode*)select; } +static void setSubquery(SNode* pStmt) { + if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { + ((SSelectStmt*)pStmt)->isSubquery = true; + } +} + SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode* pLeft, SNode* pRight) { CHECK_PARSER_STATUS(pCxt); SSetOperator* setOp = (SSetOperator*)nodesMakeNode(QUERY_NODE_SET_OPERATOR); CHECK_OUT_OF_MEM(setOp); setOp->opType = type; setOp->pLeft = pLeft; + setSubquery(setOp->pLeft); setOp->pRight = pRight; + setSubquery(setOp->pRight); sprintf(setOp->stmtName, "%p", setOp); return (SNode*)setOp; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 9e8b28f362..8495aa15cd 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -691,7 +691,7 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p SNode* pNode; FOREACH(pNode, pProjectionList) { SExprNode* pExpr = (SExprNode*)pNode; - if (0 == strcmp((*pCol)->colName, pExpr->aliasName)) { + if (0 == strcmp((*pCol)->colName, pExpr->userAlias)) { SColumnRefNode* pColRef = (SColumnRefNode*)nodesMakeNode(QUERY_NODE_COLUMN_REF); if (NULL == pColRef) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; @@ -1535,6 +1535,7 @@ static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode } strcpy(pFunc->functionName, "_select_value"); strcpy(pFunc->node.aliasName, ((SExprNode*)*pNode)->aliasName); + strcpy(pFunc->node.userAlias, ((SExprNode*)*pNode)->userAlias); pCxt->errCode = nodesListMakeAppend(&pFunc->pParameterList, *pNode); if (TSDB_CODE_SUCCESS == pCxt->errCode) { pCxt->errCode = getFuncInfo(pCxt, pFunc); @@ -2171,12 +2172,45 @@ static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect return TSDB_CODE_SUCCESS; } +static int32_t rewriteProjectAlias(SNodeList* pProjectionList) { + int32_t no = 1; + SNode* pProject = NULL; + FOREACH(pProject, pProjectionList) { sprintf(((SExprNode*)pProject)->aliasName, "#expr_%d", no++); } + return TSDB_CODE_SUCCESS; +} + +static int32_t checkProjectAlias(STranslateContext* pCxt, SNodeList* pProjectionList) { + SHashObj* pUserAliasSet = taosHashInit(LIST_LENGTH(pProjectionList), + taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SNode* pProject = NULL; + FOREACH(pProject, pProjectionList) { + SExprNode* pExpr = (SExprNode*)pProject; + if (NULL != taosHashGet(pUserAliasSet, pExpr->userAlias, strlen(pExpr->userAlias))) { + taosHashCleanup(pUserAliasSet); + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pExpr->userAlias); + } + taosHashPut(pUserAliasSet, pExpr->userAlias, strlen(pExpr->userAlias), &pExpr, POINTER_BYTES); + } + taosHashCleanup(pUserAliasSet); + return TSDB_CODE_SUCCESS; +} + +static int32_t translateProjectionList(STranslateContext* pCxt, SSelectStmt* pSelect) { + if (pSelect->isSubquery) { + return checkProjectAlias(pCxt, pSelect->pProjectionList); + } + return rewriteProjectAlias(pSelect->pProjectionList); +} + static int32_t translateSelectList(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_SELECT; int32_t code = translateExprList(pCxt, pSelect->pProjectionList); if (TSDB_CODE_SUCCESS == code) { code = translateStar(pCxt, pSelect); } + if (TSDB_CODE_SUCCESS == code) { + code = translateProjectionList(pCxt, pSelect); + } if (TSDB_CODE_SUCCESS == code) { code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); } @@ -2232,7 +2266,7 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi } SNode* pPrimaryKeyCond = NULL; - nodesPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL, NULL); + filterPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL, NULL); int32_t code = TSDB_CODE_SUCCESS; if (NULL != pPrimaryKeyCond) { @@ -2699,6 +2733,7 @@ static SNode* createSetOperProject(const char* pTableAlias, SNode* pNode) { strcpy(pCol->tableAlias, pTableAlias); strcpy(pCol->colName, ((SExprNode*)pNode)->aliasName); strcpy(pCol->node.aliasName, pCol->colName); + strcpy(pCol->node.userAlias, ((SExprNode*)pNode)->userAlias); return (SNode*)pCol; } @@ -2810,7 +2845,7 @@ static int32_t partitionDeleteWhere(STranslateContext* pCxt, SDeleteStmt* pDelet SNode* pPrimaryKeyCond = NULL; SNode* pOtherCond = NULL; - int32_t code = nodesPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, NULL, &pDelete->pTagCond, &pOtherCond); + int32_t code = filterPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, NULL, &pDelete->pTagCond, &pOtherCond); if (TSDB_CODE_SUCCESS == code && NULL != pOtherCond) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DELETE_WHERE); } @@ -4983,7 +5018,7 @@ static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode) { SNode* pCurrStmt = pCxt->pCurrStmt; int32_t currLevel = pCxt->currLevel; pCxt->currLevel = ++(pCxt->levelNo); - int32_t code = translateQuery(pCxt, pNode); + int32_t code = translateQuery(pCxt, pNode); pCxt->currClause = currClause; pCxt->pCurrStmt = pCurrStmt; pCxt->currLevel = currLevel; diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp index c91e09c825..ebf8012cf7 100644 --- a/source/libs/parser/test/parSelectTest.cpp +++ b/source/libs/parser/test/parSelectTest.cpp @@ -70,6 +70,8 @@ TEST_F(ParserSelectTest, condition) { run("SELECT c1 FROM t1 WHERE NOT ts in (true, false)"); run("SELECT * FROM t1 WHERE c1 > 10 and c1 is not null"); + + run("SELECT * FROM t1 WHERE TBNAME like 'fda%' or TS > '2021-05-05 18:19:01.000'"); } TEST_F(ParserSelectTest, pseudoColumn) { diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 577266a697..cb62c0390b 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -436,7 +436,7 @@ static int32_t pushDownCondOptDealScan(SOptimizeContext* pCxt, SScanLogicNode* p SNode* pPrimaryKeyCond = NULL; SNode* pOtherCond = NULL; - int32_t code = nodesPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pScan->pTagIndexCond, &pScan->pTagCond, + int32_t code = filterPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pScan->pTagIndexCond, &pScan->pTagCond, &pOtherCond); if (TSDB_CODE_SUCCESS == code && NULL != pScan->pTagCond) { code = pushDownCondOptRebuildTbanme(&pScan->pTagCond); diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index bc5e50218b..b077e6ef55 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -763,6 +763,8 @@ static SNode* stbSplCreateColumnNode(SExprNode* pExpr) { return NULL; } if (QUERY_NODE_COLUMN == nodeType(pExpr)) { + strcpy(pCol->dbName, ((SColumnNode*)pExpr)->dbName); + strcpy(pCol->tableName, ((SColumnNode*)pExpr)->tableName); strcpy(pCol->tableAlias, ((SColumnNode*)pExpr)->tableAlias); } strcpy(pCol->colName, pExpr->aliasName); diff --git a/source/libs/planner/test/planOrderByTest.cpp b/source/libs/planner/test/planOrderByTest.cpp index 13dfbad78c..6fef080bc1 100644 --- a/source/libs/planner/test/planOrderByTest.cpp +++ b/source/libs/planner/test/planOrderByTest.cpp @@ -39,6 +39,8 @@ TEST_F(PlanOrderByTest, expr) { useDb("root", "test"); run("SELECT * FROM t1 ORDER BY c1 + 10, c2"); + + run("SELECT c1 FROM st1 ORDER BY ts, _C0"); } TEST_F(PlanOrderByTest, nullsOrder) { diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 29773b95c3..8e4e31d6cc 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -22,6 +22,7 @@ #include "tcompare.h" #include "tdatablock.h" #include "ttime.h" +#include "functionMgt.h" OptrStr gOptrStr[] = { {0, "invalid"}, @@ -3877,4 +3878,195 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, int8_t** p, SColumnData } +typedef struct SClassifyConditionCxt { + bool hasPrimaryKey; + bool hasTagIndexCol; + bool hasTagCol; + bool hasOtherCol; +} SClassifyConditionCxt; +static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) { + SClassifyConditionCxt* pCxt = (SClassifyConditionCxt*)pContext; + if (QUERY_NODE_COLUMN == nodeType(pNode)) { + SColumnNode* pCol = (SColumnNode*)pNode; + if (PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && TSDB_SYSTEM_TABLE != pCol->tableType) { + pCxt->hasPrimaryKey = true; + } else if (pCol->hasIndex) { + pCxt->hasTagIndexCol = true; + pCxt->hasTagCol = true; + } else if (COLUMN_TYPE_TAG == pCol->colType || COLUMN_TYPE_TBNAME == pCol->colType) { + pCxt->hasTagCol = true; + } else { + pCxt->hasOtherCol = true; + } + } else if (QUERY_NODE_FUNCTION == nodeType(pNode)) { + SFunctionNode* pFunc = (SFunctionNode*)pNode; + if (fmIsPseudoColumnFunc(pFunc->funcId)) { + if (FUNCTION_TYPE_TBNAME==pFunc->funcType) { + pCxt->hasTagCol = true; + } else { + pCxt->hasOtherCol = true; + } + } + } + return DEAL_RES_CONTINUE; +} + +typedef enum EConditionType { + COND_TYPE_PRIMARY_KEY = 1, + COND_TYPE_TAG_INDEX, + COND_TYPE_TAG, + COND_TYPE_NORMAL +} EConditionType; + +static EConditionType classifyCondition(SNode* pNode) { + SClassifyConditionCxt cxt = {.hasPrimaryKey = false, .hasTagIndexCol = false, .hasOtherCol = false}; + nodesWalkExpr(pNode, classifyConditionImpl, &cxt); + return cxt.hasOtherCol ? COND_TYPE_NORMAL + : (cxt.hasPrimaryKey && cxt.hasTagCol + ? COND_TYPE_NORMAL + : (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY + : (cxt.hasTagIndexCol ? COND_TYPE_TAG_INDEX : COND_TYPE_TAG))); +} + +static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, + SNode** pOtherCond) { + SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*pCondition); + + int32_t code = TSDB_CODE_SUCCESS; + + SNodeList* pPrimaryKeyConds = NULL; + SNodeList* pTagIndexConds = NULL; + SNodeList* pTagConds = NULL; + SNodeList* pOtherConds = NULL; + SNode* pCond = NULL; + FOREACH(pCond, pLogicCond->pParameterList) { + switch (classifyCondition(pCond)) { + case COND_TYPE_PRIMARY_KEY: + if (NULL != pPrimaryKeyCond) { + code = nodesListMakeAppend(&pPrimaryKeyConds, nodesCloneNode(pCond)); + } + break; + case COND_TYPE_TAG_INDEX: + if (NULL != pTagIndexCond) { + code = nodesListMakeAppend(&pTagIndexConds, nodesCloneNode(pCond)); + } + if (NULL != pTagCond) { + code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); + } + break; + case COND_TYPE_TAG: + if (NULL != pTagCond) { + code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); + } + break; + case COND_TYPE_NORMAL: + default: + if (NULL != pOtherCond) { + code = nodesListMakeAppend(&pOtherConds, nodesCloneNode(pCond)); + } + break; + } + if (TSDB_CODE_SUCCESS != code) { + break; + } + } + + SNode* pTempPrimaryKeyCond = NULL; + SNode* pTempTagIndexCond = NULL; + SNode* pTempTagCond = NULL; + SNode* pTempOtherCond = NULL; + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempTagIndexCond, &pTagIndexConds); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempTagCond, &pTagConds); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempOtherCond, &pOtherConds); + } + + if (TSDB_CODE_SUCCESS == code) { + if (NULL != pPrimaryKeyCond) { + *pPrimaryKeyCond = pTempPrimaryKeyCond; + } + if (NULL != pTagIndexCond) { + *pTagIndexCond = pTempTagIndexCond; + } + if (NULL != pTagCond) { + *pTagCond = pTempTagCond; + } + if (NULL != pOtherCond) { + *pOtherCond = pTempOtherCond; + } + nodesDestroyNode(*pCondition); + *pCondition = NULL; + } else { + nodesDestroyList(pPrimaryKeyConds); + nodesDestroyList(pTagIndexConds); + nodesDestroyList(pTagConds); + nodesDestroyList(pOtherConds); + nodesDestroyNode(pTempPrimaryKeyCond); + nodesDestroyNode(pTempTagIndexCond); + nodesDestroyNode(pTempTagCond); + nodesDestroyNode(pTempOtherCond); + } + + return code; +} + +int32_t filterPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, + SNode** pOtherCond) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCondition) && + LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCondition)->condType) { + return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagIndexCond, pTagCond, pOtherCond); + } + + bool needOutput = false; + switch (classifyCondition(*pCondition)) { + case COND_TYPE_PRIMARY_KEY: + if (NULL != pPrimaryKeyCond) { + *pPrimaryKeyCond = *pCondition; + needOutput = true; + } + break; + case COND_TYPE_TAG_INDEX: + if (NULL != pTagIndexCond) { + *pTagIndexCond = *pCondition; + needOutput = true; + } + if (NULL != pTagCond) { + SNode* pTempCond = *pCondition; + if (NULL != pTagIndexCond) { + pTempCond = nodesCloneNode(*pCondition); + if (NULL == pTempCond) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + *pTagCond = pTempCond; + needOutput = true; + } + break; + case COND_TYPE_TAG: + if (NULL != pTagCond) { + *pTagCond = *pCondition; + needOutput = true; + } + break; + case COND_TYPE_NORMAL: + default: + if (NULL != pOtherCond) { + *pOtherCond = *pCondition; + needOutput = true; + } + break; + } + if (needOutput) { + *pCondition = NULL; + } + + return TSDB_CODE_SUCCESS; +} From 97d0e558bc32816a1e1556d5ef5f73f7193f7235 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 30 Jul 2022 14:05:39 +0800 Subject: [PATCH 38/63] refactor: add assert. --- source/libs/executor/src/groupoperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 5690389302..c1e5252089 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -507,6 +507,7 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) { colDataSetNull_f(bitmap, (*rows)); } else { memcpy(data + (*columnLen), colDataGetData(pColInfoData, j), bytes); + ASSERT((data + (*columnLen) + bytes - (char*)pPage) <= getBufPageSize(pInfo->pBuf)); } contentLen = bytes; } From d2f57b4ad7e3dae777db4d2cb392b67be7231293 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 30 Jul 2022 15:09:53 +0800 Subject: [PATCH 39/63] fix alter tag --- source/dnode/vnode/src/meta/metaTable.c | 7 ++++--- source/libs/index/src/indexFilter.c | 13 +++++++++++-- tests/script/tsim/tag/set.sim | 2 +- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 26c81976dc..2dca796ea6 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -793,9 +793,6 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA goto _err; } - if (iCol == 0) { - // TODO : need to update tag index - } ctbEntry.version = version; if (pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON) { ctbEntry.ctbEntry.pTags = taosMemoryMalloc(pAlterTbReq->nTagVal); @@ -849,6 +846,10 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA // save to uid.idx tdbTbUpsert(pMeta->pUidIdx, &ctbEntry.uid, sizeof(tb_uid_t), &version, sizeof(version), &pMeta->txn); + if (iCol == 0) { + metaUpdateTagIdx(pMeta, &ctbEntry); + } + tDecoderClear(&dc1); tDecoderClear(&dc2); if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags); diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 6fa7422b66..6dfdbf6840 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -385,8 +385,9 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP .reverse = reverse, .filterFunc = filterFunc}; - char buf[128] = {0}; - float f = 0.0; + char buf[128] = {0}; + float f = 0.0; + double d = 0.0; if (IS_VAR_DATA_TYPE(left->colValType)) { if (!IS_VAR_DATA_TYPE(right->colValType)) { NUM_TO_STRING(right->colValType, right->condValue, sizeof(buf) - 2, buf + VARSTR_HEADER_SIZE); @@ -405,6 +406,14 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP f = *(int32_t *)(right->condValue); param.val = &f; } + } else if (left->colValType == TSDB_DATA_TYPE_DOUBLE) { + if (right->colValType == TSDB_DATA_TYPE_DOUBLE) { + d = GET_DOUBLE_VAL(right->condValue); + param.val = &d; + } else if (right->colValType == TSDB_DATA_TYPE_BIGINT) { + d = *(int64_t *)(right->condValue); + param.val = &d; + } } } ret = metaFilterTableIds(arg->metaEx, ¶m, output->result); diff --git a/tests/script/tsim/tag/set.sim b/tests/script/tsim/tag/set.sim index 2c14313a07..5bd3463e3a 100644 --- a/tests/script/tsim/tag/set.sim +++ b/tests/script/tsim/tag/set.sim @@ -452,4 +452,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 77837f6e6a9be731a3efd4b16c9131aaf8f77735 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Sat, 30 Jul 2022 15:10:24 +0800 Subject: [PATCH 40/63] fix: fix crash issue --- include/os/osString.h | 1 + source/client/src/clientEnv.c | 2 ++ source/dnode/mgmt/exe/dmMain.c | 2 ++ source/os/src/osString.c | 36 +++++++++++++++++----------------- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/include/os/osString.h b/include/os/osString.h index 2a22c6d8ff..8eb341faa7 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -62,6 +62,7 @@ typedef int32_t TdUcs4; int32_t taosUcs4len(TdUcs4 *ucs4); int64_t taosStr2int64(const char *str); +void taosConvInit(void); void taosConvDestroy(); int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs); bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len); diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 893aa39ce3..3d539ea251 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -361,6 +361,8 @@ void taos_init_imp(void) { initQueryModuleMsgHandle(); + taosConvInit(); + rpcInit(); SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100}; diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 34c3b40556..4030eaa6fe 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -218,6 +218,8 @@ int mainWindows(int argc,char** argv) { taosCleanupArgs(); return -1; } + + taosConvInit(); if (global.dumpConfig) { dmDumpCfg(); diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 503b8e0c6b..26aafd743f 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -143,29 +143,29 @@ SConv *gConv = NULL; int32_t convUsed = 0; int32_t gConvMaxNum = 0; -void taosConvInitImpl(void) { +void taosConvInit(void) { gConvMaxNum = 512; gConv = taosMemoryCalloc(gConvMaxNum, sizeof(SConv)); for (int32_t i = 0; i < gConvMaxNum; ++i) { gConv[i].conv = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); + if ((iconv_t)-1 == gConv[i].conv || (iconv_t)0 == gConv[i].conv) { + ASSERT(0); + } } } -static TdThreadOnce convInit = PTHREAD_ONCE_INIT; -void taosConvInit() { - taosThreadOnce(&convInit, taosConvInitImpl); -} - void taosConvDestroy() { for (int32_t i = 0; i < gConvMaxNum; ++i) { iconv_close(gConv[i].conv); } taosMemoryFreeClear(gConv); + gConvMaxNum = -1; } -void taosAcquireConv(int32_t *idx) { - if (0 == gConvMaxNum) { - taosConvInit(); +iconv_t taosAcquireConv(int32_t *idx) { + if (gConvMaxNum <= 0) { + *idx = -1; + return iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); } while (true) { @@ -193,12 +193,12 @@ void taosAcquireConv(int32_t *idx) { } *idx = startId; + return gConv[startId].conv; } -void taosReleaseConv(int32_t idx) { - if (0 == gConvMaxNum) { - iconv_close(gConv[0].conv); - taosMemoryFreeClear(gConv); +void taosReleaseConv(int32_t idx, iconv_t conv) { + if (idx < 0) { + iconv_close(conv); return; } @@ -213,16 +213,16 @@ bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4 #else memset(ucs4, 0, ucs4_max_len); - int32_t idx = 0; - taosAcquireConv(&idx); + int32_t idx = -1; + iconv_t conv = taosAcquireConv(&idx); size_t ucs4_input_len = mbsLength; size_t outLeft = ucs4_max_len; - if (iconv(gConv[idx].conv, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) { - taosReleaseConv(idx); + if (iconv(conv, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) { + taosReleaseConv(idx, conv); return false; } - taosReleaseConv(idx); + taosReleaseConv(idx, conv); if (len != NULL) { *len = (int32_t)(ucs4_max_len - outLeft); if (*len < 0) { From ac0a32d9e4217ba8312710cc290ef08a736be2d5 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Sat, 30 Jul 2022 07:20:38 +0000 Subject: [PATCH 41/63] fix: multi-level storage coredump --- source/dnode/vnode/src/tsdb/tsdbCommit.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 24f066f703..24b300cb1e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -311,6 +311,8 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) { tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did); + tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did); + wSet.diskId = did; wSet.fid = pCommitter->commitFid; fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0}; From e57f12d5817e5fc8b598d171f246b95d448028fa Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Sat, 30 Jul 2022 15:23:28 +0800 Subject: [PATCH 42/63] fix: use colId to merge rows --- source/dnode/vnode/src/tsdb/tsdbRead.c | 8 ++++++++ source/dnode/vnode/src/tsdb/tsdbUtil.c | 26 +++++++++++++++++--------- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 9545ded927..47511cce98 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2126,6 +2126,10 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe } tRowMergerAdd(pMerger, pRow, pTSchema); + + if (sversion != pReader->pSchema->version) { + taosMemoryFree(pTSchema); + } } return TSDB_CODE_SUCCESS; @@ -2262,6 +2266,10 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe doMergeRowsInBuf(pIter, uid, k.ts, pDelList, &merge, pReader); tRowMergerGetRow(&merge, pTSRow); tRowMergerClear(&merge); + + if (sversion != pReader->pSchema->version) { + taosMemoryFree(pTSchema); + } } void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 211a330599..ae9f3ae7c3 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -575,7 +575,7 @@ int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRo TSDBKEY key = TSDBROW_KEY(pRow); SColVal *pColVal = &(SColVal){0}; STColumn *pTColumn; - int32_t iCol = 0; + int32_t iCol, jCol = 0; pMerger->pTSchema = pResTSchema; pMerger->version = key.version; @@ -587,7 +587,7 @@ int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRo } // ts - pTColumn = &pTSchema->columns[iCol++]; + pTColumn = &pTSchema->columns[jCol++]; ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP); @@ -598,14 +598,18 @@ int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRo } // other - for (; iCol < pTSchema->numOfCols && iCol < pResTSchema->numOfCols; ++iCol) { + for (iCol = 1; jCol < pTSchema->numOfCols && iCol < pResTSchema->numOfCols; ++iCol) { pTColumn = &pResTSchema->columns[iCol]; - if (pTSchema->columns[iCol].colId != pTColumn->colId) { + if (pTSchema->columns[jCol].colId < pTColumn->colId) { + ++jCol; + --iCol; + continue; + } else if (pTSchema->columns[jCol].colId > pTColumn->colId) { taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type)); continue; } - tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal); + tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal); if (taosArrayPush(pMerger->pArray, pColVal) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; @@ -626,17 +630,21 @@ int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) { TSDBKEY key = TSDBROW_KEY(pRow); SColVal *pColVal = &(SColVal){0}; STColumn *pTColumn; - int32_t iCol; + int32_t iCol, jCol = 1; ASSERT(((SColVal *)pMerger->pArray->pData)->value.ts == key.ts); - for (iCol = 1; iCol < pMerger->pTSchema->numOfCols && iCol < pTSchema->numOfCols; ++iCol) { + for (iCol = 1; iCol < pMerger->pTSchema->numOfCols && jCol < pTSchema->numOfCols; ++iCol) { pTColumn = &pMerger->pTSchema->columns[iCol]; - if (pTSchema->columns[iCol].colId != pTColumn->colId) { + if (pTSchema->columns[jCol].colId < pTColumn->colId) { + ++jCol; + --iCol; + continue; + } else if (pTSchema->columns[jCol].colId > pTColumn->colId) { continue; } - tsdbRowGetColVal(pRow, pMerger->pTSchema, iCol, pColVal); + tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal); if (key.version > pMerger->version) { if (!pColVal->isNone) { From 15a08b9999f3a20ee7162f125ad7927a224b7f9f Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 30 Jul 2022 15:29:57 +0800 Subject: [PATCH 43/63] fix: some problems of planner --- source/libs/nodes/src/nodesCodeFuncs.c | 7 +++++++ source/libs/nodes/src/nodesUtilFuncs.c | 2 +- source/libs/planner/test/planSubqueryTest.cpp | 6 ++++++ tests/script/tsim/parser/condition_query.sim | 6 +++--- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index b0c16f26ed..9d15b01acf 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2684,6 +2684,7 @@ static int32_t jsonToDataType(const SJson* pJson, void* pObj) { static const char* jkExprDataType = "DataType"; static const char* jkExprAliasName = "AliasName"; +static const char* jkExprUserAlias = "UserAlias"; static int32_t exprNodeToJson(const void* pObj, SJson* pJson) { const SExprNode* pNode = (const SExprNode*)pObj; @@ -2692,6 +2693,9 @@ static int32_t exprNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddStringToObject(pJson, jkExprAliasName, pNode->aliasName); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkExprUserAlias, pNode->userAlias); + } return code; } @@ -2703,6 +2707,9 @@ static int32_t jsonToExprNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetStringValue(pJson, jkExprAliasName, pNode->aliasName); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkExprUserAlias, pNode->userAlias); + } return code; } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index b7bcd13aa0..37d80917b3 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -1502,7 +1502,7 @@ static EDealRes collectColumns(SNode* pNode, void* pContext) { SCollectColumnsCxt* pCxt = (SCollectColumnsCxt*)pContext; if (QUERY_NODE_COLUMN == nodeType(pNode)) { SColumnNode* pCol = (SColumnNode*)pNode; - if (isCollectType(pCxt->collectType, pCol->colType) && + if (isCollectType(pCxt->collectType, pCol->colType) && 0 != strcmp(pCol->colName, "*") && (NULL == pCxt->pTableAlias || 0 == strcmp(pCxt->pTableAlias, pCol->tableAlias))) { return doCollect(pCxt, pCol, pNode); } diff --git a/source/libs/planner/test/planSubqueryTest.cpp b/source/libs/planner/test/planSubqueryTest.cpp index 16dd91846c..aed77c0811 100644 --- a/source/libs/planner/test/planSubqueryTest.cpp +++ b/source/libs/planner/test/planSubqueryTest.cpp @@ -73,3 +73,9 @@ TEST_F(PlanSubqeuryTest, outerInterval) { run("SELECT COUNT(*) FROM (SELECT ts, TOP(c1, 10) FROM st1s1) INTERVAL(5s)"); } + +TEST_F(PlanSubqeuryTest, outerPartition) { + useDb("root", "test"); + + run("SELECT c1, COUNT(*) FROM (SELECT ts, c1 FROM st1) PARTITION BY c1"); +} diff --git a/tests/script/tsim/parser/condition_query.sim b/tests/script/tsim/parser/condition_query.sim index 87ea9f3cbe..a8a9be5cfd 100644 --- a/tests/script/tsim/parser/condition_query.sim +++ b/tests/script/tsim/parser/condition_query.sim @@ -2631,7 +2631,7 @@ sql_error select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts or print "ts&tbname test" -sql_error select count(*) from stb1 where ts > 0 or tbname like 'tb%'; +sql select count(*) from stb1 where ts > 0 or tbname like 'tb%'; print "ts&tag test" sql select count(*) from stb1 where ts > 0 or t1 > 0; @@ -2717,9 +2717,9 @@ print "tbname&tag&join test" print "column&ts&tbname&tag test" -sql_error select * from stb1 where (tbname like 'tb%' or ts > '2021-05-05 18:19:01.000') and (t1 > 5 or t1 < 4) and c1 > 0; +sql select * from stb1 where (tbname like 'tb%' or ts > '2021-05-05 18:19:01.000') and (t1 > 5 or t1 < 4) and c1 > 0; sql select * from stb1 where (ts > '2021-05-05 18:19:01.000') and (ts > '2021-05-05 18:19:02.000' or t1 > 3) and (t1 > 5 or t1 < 4) and c1 > 0; -sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:20.000' and col > 0 and t1 > 0; +sql select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:20.000' and c1 > 0 and t1 > 0; print "column&ts&tbname&join test" From dc6c0c1d1ad79dd386dd12db81c876062c4b450c Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 30 Jul 2022 15:46:43 +0800 Subject: [PATCH 44/63] fix: some problems of planner --- source/libs/parser/src/parTranslater.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8495aa15cd..02a7ca0fbb 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -523,6 +523,9 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p if ('\0' == pCol->node.aliasName[0]) { strcpy(pCol->node.aliasName, pColSchema->name); } + if ('\0' == pCol->node.userAlias[0]) { + strcpy(pCol->node.userAlias, pColSchema->name); + } pCol->tableId = pTable->pMeta->uid; pCol->tableType = pTable->pMeta->tableType; pCol->colId = pColSchema->colId; @@ -549,6 +552,9 @@ static void setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColum if ('\0' == pCol->node.aliasName[0]) { strcpy(pCol->node.aliasName, pCol->colName); } + if ('\0' == pCol->node.userAlias[0]) { + strcpy(pCol->node.userAlias, pCol->colName); + } pCol->node.resType = pExpr->resType; } @@ -2175,7 +2181,13 @@ static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect static int32_t rewriteProjectAlias(SNodeList* pProjectionList) { int32_t no = 1; SNode* pProject = NULL; - FOREACH(pProject, pProjectionList) { sprintf(((SExprNode*)pProject)->aliasName, "#expr_%d", no++); } + FOREACH(pProject, pProjectionList) { + SExprNode* pExpr = (SExprNode*)pProject; + if ('\0' == pExpr->userAlias[0]) { + strcpy(pExpr->userAlias, pExpr->aliasName); + } + sprintf(pExpr->aliasName, "#expr_%d", no++); + } return TSDB_CODE_SUCCESS; } From 9d62c839d9182c3995bc8daaf6ef7e8098c92a05 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 30 Jul 2022 15:52:43 +0800 Subject: [PATCH 45/63] feat: update taostools for3.0 (#15587) * feat: update taos-tools for 3.0 [TD-14141] * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools 8e3b3ee * fix: remove submodules * feat: update taos-tools c529299 * feat: update taos-tools 9dc2fec for 3.0 * fix: optim upx --- .gitignore | 4 ++++ tools/CMakeLists.txt | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 80fd850cd4..c25f60075c 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,10 @@ pysim/ tests/script/api/batchprepare taosadapter taosadapter-debug +tools/taos-tools/* +tools/taosws-rs/* +tools/taosadapter/* +tools/upx* # Doxygen Generated files html/ diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 58095940f3..1f066daabe 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -112,7 +112,7 @@ ELSE () COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND wget -c https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -O ${CMAKE_CURRENT_SOURCE_DIR}/upx.tar.xz && tar -xvJf ${CMAKE_CURRENT_SOURCE_DIR}/upx.tar.xz -C ${CMAKE_CURRENT_SOURCE_DIR} --strip-components 1 > /dev/null && ${CMAKE_CURRENT_SOURCE_DIR}/upx taosadapter || : + COMMAND wget -c https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -O $ENV{HOME}/upx.tar.xz && tar -xvJf $ENV{HOME}/upx.tar.xz -C $ENV{HOME}/ --strip-components 1 > /dev/null && $ENV{HOME}/upx taosadapter || : COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ From eccc590d073011d2e73f563c9ed1d54e32571105 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 30 Jul 2022 16:12:02 +0800 Subject: [PATCH 46/63] fix index bugf --- source/dnode/vnode/src/meta/metaTable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 2dca796ea6..702c7fb505 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -1132,7 +1132,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { pCtbEntry->uid, &pTagIdxKey, &nTagIdxKey) < 0) { return -1; } - tdbTbInsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, &pMeta->txn); + tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, &pMeta->txn); metaDestroyTagIdxKey(pTagIdxKey); tDecoderClear(&dc); tdbFree(pData); From 6d67b171138e4cf0a90c4301a73bbe7b9672dd2f Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Sat, 30 Jul 2022 16:04:06 +0800 Subject: [PATCH 47/63] enh(wal): add lock to guarantee read behaviour --- source/dnode/vnode/src/tq/tq.c | 3 +++ source/libs/function/src/builtins.c | 5 +++-- source/libs/wal/src/walRead.c | 11 +++++++++++ source/libs/wal/src/walWrite.c | 2 +- 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 364ecbab61..86b016b8be 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -684,6 +684,9 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) { taosHashPut(pTq->pStreamTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); + /*SMeta* pMeta = pTq->pVnode->pMeta;*/ + /*tdbTbUpsert(pMeta->pTaskIdx, &pTask->taskId, sizeof(int32_t), msg, msgLen, &pMeta->txn);*/ + return 0; FAIL: if (pTask->inputQueue) streamQueueClose(pTask->inputQueue); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index c9f462bae1..83ac7438fe 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2453,13 +2453,14 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "diff", .type = FUNCTION_TYPE_DIFF, - .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC, .translateFunc = translateDiff, .getEnvFunc = getDiffFuncEnv, .initFunc = diffFunctionSetup, .processFunc = diffFunction, .sprocessFunc = diffScalarFunction, - .finalizeFunc = functionFinalize + .finalizeFunc = functionFinalize, + .estimateReturnRowsFunc = diffEstReturnRows, }, { .name = "statecount", diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 787c9af317..9be648b518 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -441,9 +441,12 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { return -1; } + taosThreadMutexLock(&pReader->mutex); + if (pReader->curInvalid || pReader->curVersion != ver) { if (walReadSeekVer(pReader, ver) < 0) { wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since %s", pReader->pWal->cfg.vgId, ver, terrstr()); + taosThreadMutexUnlock(&pReader->mutex); return -1; } seeked = true; @@ -464,6 +467,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } ASSERT(0); + taosThreadMutexUnlock(&pReader->mutex); return -1; } } @@ -473,6 +477,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since head checksum not passed", pReader->pWal->cfg.vgId, ver); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; + taosThreadMutexUnlock(&pReader->mutex); return -1; } @@ -480,6 +485,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { void *ptr = taosMemoryRealloc(pReader->pHead, sizeof(SWalCkHead) + pReader->pHead->head.bodyLen); if (ptr == NULL) { terrno = TSDB_CODE_WAL_OUT_OF_MEMORY; + taosThreadMutexUnlock(&pReader->mutex); return -1; } pReader->pHead = ptr; @@ -494,6 +500,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; ASSERT(0); } + taosThreadMutexUnlock(&pReader->mutex); return -1; } @@ -503,6 +510,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { pReader->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; ASSERT(0); + taosThreadMutexUnlock(&pReader->mutex); return -1; } @@ -516,9 +524,12 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { pReader->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; ASSERT(0); + taosThreadMutexUnlock(&pReader->mutex); return -1; } pReader->curVersion++; + taosThreadMutexUnlock(&pReader->mutex); + return 0; } diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 4eadc92f70..68d8c54e28 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -408,7 +408,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy pWal->writeHead.head.version = index; pWal->writeHead.head.bodyLen = bodyLen; pWal->writeHead.head.msgType = msgType; - pWal->writeHead.head.ingestTs = taosGetTimestampMs(); + pWal->writeHead.head.ingestTs = 0; // sync info for sync module pWal->writeHead.head.syncMeta = syncMeta; From 4b6b1de91cace2ba12c1e029fa05f97dda73b017 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Sat, 30 Jul 2022 17:30:10 +0800 Subject: [PATCH 48/63] refactor(sync): restart hb timer when replicate get error term --- source/libs/sync/inc/syncInt.h | 2 ++ source/libs/sync/src/syncMain.c | 17 +++++++++++++++++ source/libs/sync/src/syncReplication.c | 1 + 3 files changed, 20 insertions(+) diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 586cfc0f15..bc3275a971 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -191,8 +191,10 @@ int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode); int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms); int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode); int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode); // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 52cbcd0059..9a56bb3fb8 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -1312,6 +1312,17 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) { return ret; } +int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) { + int32_t ret = 0; + if (syncEnvIsStart()) { + taosTmrReset(pSyncNode->FpHeartbeatTimerCB, 1, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pHeartbeatTimer); + atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser); + } else { + sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId); + } + return ret; +} + int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) { int32_t ret = 0; atomic_add_fetch_64(&pSyncNode->heartbeatTimerLogicClockUser, 1); @@ -1326,6 +1337,12 @@ int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode) { return 0; } +int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode) { + syncNodeStopHeartbeatTimer(pSyncNode); + syncNodeStartNowHeartbeatTimer(pSyncNode); + return 0; +} + // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg) { SEpSet epSet; diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 1a2a083677..dc7d8c4f52 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -140,6 +140,7 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { ", match-index:%d, raftid:%" PRId64, pSyncNode->vgId, nextIndex, newNextIndex, SYNC_INDEX_INVALID, pDestId->addr); + syncNodeRestartNowHeartbeatTimer(pSyncNode); return -1; } From b4a9dcb29860243707522ef9bdb7c86c17f9160d Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Sat, 30 Jul 2022 17:35:04 +0800 Subject: [PATCH 49/63] update case --- ...mnode_basic_replica1_insertdatas_querys.py | 2 +- ...nsertdatas_querys_loop_restart_follower.py | 7 +++--- ..._insertdatas_querys_loop_restart_leader.py | 5 ++-- ...ca3_insertdatas_stop_leader_forece_stop.py | 4 ++-- ...asic_replica3_mnode3_insertdatas_querys.py | 8 +++---- ...basic_replica3_querydatas_stop_follower.py | 2 +- ...e_basic_replica3_querydatas_stop_leader.py | 4 ++-- ...lica3_querydatas_stop_leader_force_stop.py | 2 +- tests/system-test/fulltest.sh | 24 +++++++++++++++++++ 9 files changed, 42 insertions(+), 16 deletions(-) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py index dafd4d7745..02d944b08f 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py @@ -30,7 +30,7 @@ class TDTestCase: self.vgroups = 1 self.tb_nums = 10 self.row_nums = 1000 - self.query_times = 1000 + self.query_times = 500 def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py index 19f9179e1e..fdd5ec7d46 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py @@ -29,9 +29,10 @@ class TDTestCase: self.replica = 3 self.vgroups = 10 self.tb_nums = 10 - self.row_nums = 1000 + self.row_nums = 100 self.max_restart_time = 20 - self.restart_server_times = 10 + self.restart_server_times = 5 + self.query_times = 100 def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -300,7 +301,7 @@ class TDTestCase: restart_servers = threading.Thread(target = self.loop_restart_follower_constantly, args = (self.restart_server_times ,self.db_name)) restart_servers.start() - reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) reading.start() writing.join() diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py index f7d80766c7..cbb007d961 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py @@ -29,9 +29,10 @@ class TDTestCase: self.replica = 3 self.vgroups = 10 self.tb_nums = 10 - self.row_nums = 1000 + self.row_nums = 100 self.max_restart_time = 20 self.restart_server_times = 10 + self.query_times = 100 def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -353,7 +354,7 @@ class TDTestCase: restart_servers = threading.Thread(target = self.loop_restart_follower_constantly, args = (self.restart_server_times ,self.db_name)) restart_servers.start() - reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) reading.start() writing.join() diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py index 918a628c6b..38aad78812 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py @@ -33,10 +33,10 @@ class TDTestCase: self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None - self.loop_restart_times = 10 + self.loop_restart_times = 5 self.current_thread = None self.max_restart_time = 5 - + self.try_check_times = 10 def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) if ("community" in selfPath): diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py index a0d8f21cff..ed20a51595 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py @@ -29,7 +29,7 @@ class TDTestCase: self.replica = 3 self.vgroups = 1 self.tb_nums = 10 - self.row_nums = 2000 + self.row_nums = 1000 self.query_times = 100 def getBuildPath(self): @@ -170,10 +170,10 @@ class TDTestCase: # tdSql.checkRows(tb_nums) def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): - + newTdSql=tdCom.newTdSql() for loop_time in range(times): tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) - self.check_insert_status( db_name, tb_nums , row_nums) + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) def loop_create_databases(self, times , tb_nums , row_nums): newTdSql=tdCom.newTdSql() @@ -203,7 +203,7 @@ class TDTestCase: reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) reading.start() - create_db = threading.Thread(target = self.loop_create_databases, args=(10, 10 , 10)) + create_db = threading.Thread(target = self.loop_create_databases, args=(5, 10 , 10)) create_db.start() writing.join() diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py index a8d6a2d4d1..f9c36a8977 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py @@ -392,7 +392,7 @@ class TDTestCase: self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) # let query task start - self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) + self.thread_list = self.multi_thread_query_task(5 ,self.db_name ,'stb1' ) # force stop follower for loop in range(self.loop_restart_times): diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py index f72dfb40eb..dd7c7d275d 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py @@ -425,7 +425,7 @@ class TDTestCase: self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) # let query task start - self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) + self.thread_list = self.multi_thread_query_task(2 ,self.db_name ,'stb1' ) newTdSql=tdCom.newTdSql() # force stop follower @@ -435,7 +435,7 @@ class TDTestCase: # get leader info before stop before_leader_infos = self.get_leader_infos(newTdSql , self.db_name) - self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) + self.stop_dnode_id = self._get_stop_dnode_id(self.db_name,"leader") tdDnodes[self.stop_dnode_id-1].stoptaosd() start = time.time() diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py index 191b8eb104..0ba0bf5ae6 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py @@ -426,7 +426,7 @@ class TDTestCase: self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) # let query task start - self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) + self.thread_list = self.multi_thread_query_task(5 ,self.db_name ,'stb1' ) # force stop follower for loop in range(self.loop_restart_times): diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 7a3ad1070c..86b1cfc030 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -193,6 +193,30 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 +# vnode case +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 + python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3 python3 ./test.py -f 7-tmq/basic5.py From 8263f8224980ccb4faf6c02ab4e8db65202d7d86 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao <36554565+glzhao89@users.noreply.github.com> Date: Sat, 30 Jul 2022 17:40:03 +0800 Subject: [PATCH 50/63] Update 10-function.md --- docs/zh/12-taos-sql/10-function.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 7e4f3420b8..ace80ecaca 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -401,7 +401,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] **返回结果类型**:CAST 中指定的类型(type_name)。 -**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型。 +**适用数据类型**:输入参数 expression 的类型可以是除JSON外的所有类型。 **嵌套子查询支持**:适用于内层查询和外层查询。 @@ -410,10 +410,10 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] **使用说明**: - 对于不能支持的类型转换会直接报错。 -- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: +- 对于类型支持但某些值无法正确转换的情况,对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: 1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。 2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。 - 3)转换到字符串类型时,如果转换后长度超过type_name的长度,则会截断,但不会报错。 + 3)转换到字符串类型时,如果转换后长度超过type_name中指定的长度,则会截断,但不会报错。 #### TO_ISO8601 @@ -421,7 +421,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果附带当前客户端的系统时区信息。 +**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。 **返回结果数据类型**:VARCHAR 类型。 @@ -435,7 +435,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; - timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。 - 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; -- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 +- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 #### TO_JSON @@ -516,7 +516,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM **功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。 -**返回结果数据类型**:BIGINT。输入包含不符合时间日期格式字符串则返回 NULL。 +**返回结果数据类型**:BIGINT。 **应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 @@ -528,6 +528,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM - 支持的时间单位 time_unit 如下: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 - 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 +- 输入包含不符合时间日期格式的字符串则返回 NULL。 #### TIMETRUNCATE @@ -548,6 +549,7 @@ SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name - 支持的时间单位 time_unit 如下: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 - 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +- 输入包含不符合时间日期格式的字符串则返回 NULL。 #### TIMEZONE From 8595267cbc9ad621100015cdd06988c77027b2f0 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Sat, 30 Jul 2022 17:46:28 +0800 Subject: [PATCH 51/63] update case --- tests/system-test/fulltest.sh | 44 +++++++++++++++++------------------ 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 86b1cfc030..75e3664bbd 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -194,28 +194,28 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 # vnode case -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3 From b8c982b305b09597a0e34dc60433a982d4740a92 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao <36554565+glzhao89@users.noreply.github.com> Date: Sat, 30 Jul 2022 17:48:37 +0800 Subject: [PATCH 52/63] Update 10-function.md --- docs/zh/12-taos-sql/10-function.md | 56 ++++++++++++++---------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index ace80ecaca..ce2df06e55 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -661,8 +661,6 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE **适用数据类型**:TIMESTAMP。 -**支持的版本**:2.6.0.0 及以后的版本。 - **适用于**: 表,超级表,嵌套查询的外层查询 **说明**: @@ -775,11 +773,11 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam 用户指定 bin 的具体数值。 - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" - "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" - "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 @@ -960,20 +958,20 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause]; SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` - **功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 +**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - **返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。 +**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。 - **适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。 +**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。 - **嵌套子查询支持**: 适用于内层查询和外层查询。 +**嵌套子查询支持**: 适用于内层查询和外层查询。 - **适用于**:表和超级表。 +**适用于**:表和超级表。 - **使用说明**: - - - 不能参与表达式计算;该函数可以应用在普通表和超级表上; - - 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。 +**使用说明**: + +- 不能参与表达式计算;该函数可以应用在普通表和超级表上; +- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。 ### TAIL @@ -1050,9 +1048,9 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] **使用说明**: - - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 - - 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 - - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 +- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 +- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 +- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 ### DERIVATIVE @@ -1071,8 +1069,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **使用说明**: - - DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。 - - 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 +- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。 +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 ### DIFF @@ -1090,8 +1088,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **使用说明**: - - 输出结果行数是范围内总行数减一,第一行没有结果输出。 - - 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 +- 输出结果行数是范围内总行数减一,第一行没有结果输出。 +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 ### IRATE @@ -1115,21 +1113,21 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause; SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` - **功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 +**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - **返回结果类型**: DOUBLE。 +**返回结果类型**: DOUBLE。 - **适用数据类型**: 数值类型。 +**适用数据类型**: 数值类型。 - **嵌套子查询支持**: 适用于内层查询和外层查询。 +**嵌套子查询支持**: 适用于内层查询和外层查询。 - **适用于**:表和超级表。 +**适用于**:表和超级表。 - **使用说明**: +**使用说明**: - - 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); - - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; - - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 +- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); +- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; +- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 ### STATECOUNT From ed557fdc2a0e646f4290da2db3ef080857ef326e Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Sat, 30 Jul 2022 17:48:39 +0800 Subject: [PATCH 53/63] Update metaSnapshot.c --- source/dnode/vnode/src/meta/metaSnapshot.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index 459bed464b..7f69c7a638 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -109,7 +109,7 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) { } SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData); - pHdr->type = 0; // TODO: use macro + pHdr->type = SNAP_DATA_META; pHdr->size = nData; memcpy(pHdr->data, pData, nData); @@ -144,8 +144,6 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr pWriter->pMeta = pMeta; pWriter->sver = sver; pWriter->ever = ever; - - metaBegin(pMeta); metaBegin(pMeta); @@ -196,4 +194,4 @@ int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) _err: metaError("vgId:%d vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code)); return code; -} \ No newline at end of file +} From c691ecd107cc54dc2e44428ed59826bcf9903c27 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao <36554565+glzhao89@users.noreply.github.com> Date: Sat, 30 Jul 2022 17:51:54 +0800 Subject: [PATCH 54/63] Update 10-function.md --- docs/zh/12-taos-sql/10-function.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index ce2df06e55..bbf6b52eb9 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -767,8 +767,8 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam **适用于**: 表和超级表。 **详细说明**: -1. bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 -2. bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): +- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 +- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): - "user_input": "[1, 3, 5, 7]" 用户指定 bin 的具体数值。 @@ -779,7 +779,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 -3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 +- normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 ### PERCENTILE From 4c6de616d56cf9efcd648df430052b1bc0c26760 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao <36554565+glzhao89@users.noreply.github.com> Date: Sat, 30 Jul 2022 18:03:57 +0800 Subject: [PATCH 55/63] Update 10-function.md --- docs/en/12-taos-sql/10-function.md | 58 +++++++++++++++--------------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index c5cbf5a70b..6375422b07 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -273,7 +273,7 @@ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHER **Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. If input strings contain NULL value, the result is NULL. -**Applicable data types**: VARCHAR, NCHAR. At least 2 input strings are requird, and at most 8 input strings are allowed. +**Applicable data types**: VARCHAR, NCHAR. At least 2 input strings are required, and at most 8 input strings are allowed. **Applicable table types**: table, STable. @@ -290,7 +290,7 @@ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | st **Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. If input strings contain NULL value, the result is NULL. -**Applicable data types**: VARCHAR, NCHAR. At least 3 input strings are requird, and at most 9 input strings are allowed. +**Applicable data types**: VARCHAR, NCHAR. At least 3 input strings are required, and at most 9 input strings are allowed. **Applicable table types**: table, STable. @@ -527,6 +527,7 @@ SELECT TIMEDIFF(ts1 | datetime_string1, ts2 | datetime_string2 [, time_unit]) FR - Time unit specified by `time_unit` can be: 1b(nanosecond), 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). - If `time_unit` parameter is not specified, the precision of the returned time duration is same as the precision set for the current database in use. +- If input date-time string cannot be converted to UNIX timestamp, NULL value is returned. #### TIMETRUNCATE @@ -547,6 +548,7 @@ SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name - Time unit specified by `time_unit` can be: 1b(nanosecond),1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). - The precision of the returned timestamp is same as the precision set for the current database in use. +- If input date-time string cannot be converted to UNIX timestamp, NULL value is returned. #### TIMEZONE @@ -599,7 +601,7 @@ FROM { tb_name | stb_name } [WHERE clause] **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -654,7 +656,7 @@ SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE claus **Return value type**:DOUBLE. -**Applicable Column type**:TIMESTAMP. +**Applicable data type**:TIMESTAMP. **Applicable tables**: table, STable, outter in nested query. @@ -693,7 +695,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -709,7 +711,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -723,7 +725,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause]; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -737,7 +739,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: INTEGER. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. @@ -751,14 +753,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam **Return value type**:DOUBLE or BIGINT, depends on normalized parameter settings. -**Applicable column type**:Numerical types. +**Applicable data type**:Numerical types. **Applicable table types**: table, STable. **Explanations**: -1. bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 -2. bin_description: parameter to describe the rule to generate buckets,can be in the following JSON formats for each bin_type respectively: +- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 +- bin_description: parameter to describe the rule to generate buckets,can be in the following JSON formats for each bin_type respectively: - "user_input": "[1, 3, 5, 7]": User specified bin values. @@ -776,7 +778,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. -3. normalized: setting to 1/0 to turn on/off result normalization. +- normalized: setting to 1/0 to turn on/off result normalization. ### PERCENTILE @@ -788,7 +790,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table. @@ -808,7 +810,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -828,7 +830,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **Applicable table types**: table, STable. @@ -848,7 +850,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **Return value type**: Same as the column being operated upon. -**Applicable column types**: Numeric data types. +**Applicable data types**: Numeric data types. **Applicable table types**: table, STable, nested query. @@ -872,7 +874,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **Applicable table types**: table, STable. @@ -892,7 +894,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; **Return value type**: Same as the column being operated upon. -**Applicable column types**: All data type. +**Applicable data types**: All data type. **Applicable table types**: table, STable. @@ -911,7 +913,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the data type of the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -925,7 +927,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Return value type**: Same as the data type of the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -939,7 +941,7 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause]; **Return value type**:Same as the data type of the column being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. @@ -976,7 +978,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. ### TOP @@ -988,7 +990,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -1008,7 +1010,7 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Return value type**: Same as the column or tag being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **More explanations**: @@ -1050,7 +1052,7 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -1070,7 +1072,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Return value type**: Same as the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -1090,7 +1092,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -1183,7 +1185,7 @@ SELECT TWA(field_name) FROM tb_name WHERE clause; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. From 7bc773b8cdfefe96578611f712739f7c2d3787e6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 30 Jul 2022 18:11:44 +0800 Subject: [PATCH 56/63] fix(query): remove invalid time window close ops. --- source/libs/executor/inc/executorimpl.h | 3 +- source/libs/executor/src/executorimpl.c | 1 + source/libs/executor/src/timewindowoperator.c | 98 ++++++++++++------- 3 files changed, 65 insertions(+), 37 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index a70e7cd1dd..d587e201ef 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -549,7 +549,8 @@ typedef struct SMergeAlignedIntervalAggOperatorInfo { SIntervalAggOperatorInfo *intervalAggOperatorInfo; bool hasGroupId; - uint64_t groupId; + uint64_t groupId; // current groupId + int64_t curTs; // current ts SSDataBlock* prefetchedBlock; bool inputBlocksFinished; diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 084fbbea7c..d031494057 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1445,6 +1445,7 @@ static void doUpdateNumOfRows(SResultRow* pRow, int32_t numOfExprs, const int32_ } } +// todo extract method with copytoSSDataBlock int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset, SSDataBlock* pBlock, diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index fb1cb8dff5..31fd9a4c5f 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4589,11 +4589,10 @@ void destroyMergeAlignedIntervalOperatorInfo(void* param, int32_t numOfOutput) { static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId, SSDataBlock* pResultBlock, TSKEY wstartTs) { SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info; - SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo; - SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; - SExprSupp* pSup = &pOperatorInfo->exprSupp; - bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC); + SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo; + SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; + SExprSupp* pSup = &pOperatorInfo->exprSupp; SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &wstartTs, TSDB_KEYSIZE, tableGroupId); SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, @@ -4603,8 +4602,9 @@ static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, ui finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pSup->pCtx, pSup->pExprInfo, pSup->numOfExprs, pSup->rowEntryInfoOffset, pResultBlock, pTaskInfo); taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); + ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0); - return 0; + return TSDB_CODE_SUCCESS; } static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, @@ -4619,11 +4619,21 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR int32_t numOfOutput = pSup->numOfExprs; int64_t* tsCols = extractTsCol(pBlock, iaInfo); uint64_t tableGroupId = pBlock->info.groupId; - TSKEY blockStartTs = getStartTsKey(&pBlock->info.window, tsCols); + TSKEY currTs = getStartTsKey(&pBlock->info.window, tsCols); SResultRow* pResult = NULL; - STimeWindow win; - win.skey = blockStartTs; + // there is an result exists + if (miaInfo->curTs != INT64_MIN) { + ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); + if (currTs != miaInfo->curTs) { + outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs); + } + + miaInfo->curTs = INT64_MIN; + } + + STimeWindow win = {0}; + win.skey = currTs; win.ekey = taosTimeAdd(win.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, iaInfo->interval.precision) - 1; @@ -4634,41 +4644,44 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - TSKEY currTs = blockStartTs; - TSKEY currPos = startPos; + int32_t currPos = startPos; + STimeWindow currWin = win; - while (1) { - ++currPos; - if (currPos >= pBlock->info.rows) { - break; - } + while (++currPos < pBlock->info.rows) { if (tsCols[currPos] == currTs) { continue; - } else { - updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, - tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder); + } - outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs); + miaInfo->curTs = currWin.skey; + updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); + doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, + tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder); - currTs = tsCols[currPos]; - currWin.skey = currTs; - currWin.ekey = taosTimeAdd(currWin.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, - iaInfo->interval.precision) - - 1; - startPos = currPos; - ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, - pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); - if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } + outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs); + + currTs = tsCols[currPos]; + currWin.skey = currTs; + currWin.ekey = taosTimeAdd(currWin.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, + iaInfo->interval.precision) - 1; + + startPos = currPos; + ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, + numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); + if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } } + updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder); - outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs); + if (currPos >= pBlock->info.rows) { + // we need to see next block if exists + } else { + outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs); + miaInfo->curTs = INT64_MIN; + } } static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { @@ -4682,12 +4695,14 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { SExprSupp* pSup = &pOperator->exprSupp; SSDataBlock* pRes = iaInfo->binfo.pRes; + blockDataCleanup(pRes); blockDataEnsureCapacity(pRes, pOperator->resultInfo.capacity); if (!miaInfo->inputBlocksFinished) { SOperatorInfo* downstream = pOperator->pDownstream[0]; int32_t scanFlag = MAIN_SCAN; + while (1) { SSDataBlock* pBlock = NULL; if (miaInfo->prefetchedBlock == NULL) { @@ -4699,6 +4714,14 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { } if (pBlock == NULL) { + // close last unfinalized time window + if (miaInfo->curTs != INT64_MIN) { + ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); + outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs); + miaInfo->curTs = INT64_MIN; + } + + doSetOperatorCompleted(pOperator); miaInfo->inputBlocksFinished = true; break; } @@ -4707,7 +4730,11 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { miaInfo->hasGroupId = true; miaInfo->groupId = pBlock->info.groupId; } else if (miaInfo->groupId != pBlock->info.groupId) { + // if there are unclosed time window, close it firstly. + ASSERT(miaInfo->curTs != INT64_MIN); + outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs); miaInfo->prefetchedBlock = pBlock; + miaInfo->curTs = INT64_MIN; break; } @@ -4722,11 +4749,8 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { pRes->info.groupId = miaInfo->groupId; } - miaInfo->hasGroupId = false; - if (miaInfo->inputBlocksFinished) { - doSetOperatorCompleted(pOperator); - } + miaInfo->hasGroupId = false; size_t rows = pRes->info.rows; pOperator->resultInfo.totalRows += rows; @@ -4752,6 +4776,8 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprSupp* pSup = &pOperator->exprSupp; miaInfo->pCondition = pCondition; + miaInfo->curTs = INT64_MIN; + iaInfo->win = pTaskInfo->window; iaInfo->inputOrder = TSDB_ORDER_ASC; iaInfo->interval = *pInterval; From ad3695cee6fb946a8d4622be2f3626063fc3abe7 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Sat, 30 Jul 2022 18:15:15 +0800 Subject: [PATCH 57/63] refactor(sync): return when snapshot start > end --- source/libs/sync/src/syncAppendEntriesReply.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index bfaa785d0f..e0cc45bd1d 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -104,6 +104,16 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p // only start once static void syncNodeStartSnapshotOnce(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, SyncTerm lastApplyTerm, SyncAppendEntriesReply* pMsg) { + if (beginIndex > endIndex) { + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "snapshot param error, start:%ld, end:%ld", beginIndex, endIndex); + syncNodeErrorLog(ths, logBuf); + } while (0); + + return; + } + // get sender SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(pMsg->srcId)); ASSERT(pSender != NULL); From f8be6486267d5a9ddb2892b81e5a47586043d014 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Sat, 30 Jul 2022 18:17:17 +0800 Subject: [PATCH 58/63] fix: new alter keep[012], walLevel, walFsyncPeriod, cacheLast --- source/dnode/vnode/src/vnd/vnodeSvr.c | 57 +++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index c1ce90d4ed..e399b785f7 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -297,8 +297,8 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { vTrace("message in fetch queue is processing"); - if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || - pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) && + if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG || + pMsg->msgType == TDMT_VND_BATCH_META) && !vnodeIsLeader(pVnode)) { vnodeRedirectRpcMsg(pVnode, pMsg); return 0; @@ -486,7 +486,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR rcode = -1; goto _exit; } - + // validate hash sprintf(tbName, "%s.%s", pVnode->config.dbname, pCreateReq->name); if (vnodeValidateTableHash(pVnode, tbName) < 0) { @@ -516,7 +516,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR tdUidStoreFree(pStore); // prepare rsp - int32_t ret = 0; + int32_t ret = 0; tEncodeSize(tEncodeSVCreateTbBatchRsp, &rsp, pRsp->contLen, ret); pRsp->pCont = rpcMallocCont(pRsp->contLen); if (pRsp->pCont == NULL) { @@ -977,6 +977,8 @@ static int32_t vnodeProcessAlterHashRangeReq(SVnode *pVnode, int64_t version, vo static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) { SAlterVnodeReq alterReq = {0}; + bool walChanged = false; + if (tDeserializeSAlterVnodeReq(pReq, len, &alterReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; return TSDB_CODE_INVALID_MSG; @@ -986,9 +988,50 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void alterReq.cacheLastSize); if (pVnode->config.cacheLastSize != alterReq.cacheLastSize) { pVnode->config.cacheLastSize = alterReq.cacheLastSize; - // TODO: save config tsdbCacheSetCapacity(pVnode, (size_t)pVnode->config.cacheLastSize * 1024 * 1024); } + + if (pVnode->config.cacheLast != alterReq.cacheLast) { + pVnode->config.cacheLast = alterReq.cacheLast; + } + + if (pVnode->config.walCfg.fsyncPeriod != alterReq.walFsyncPeriod) { + pVnode->config.walCfg.fsyncPeriod = alterReq.walFsyncPeriod; + + walChanged = true; + } + + if (pVnode->config.walCfg.level != alterReq.walLevel) { + pVnode->config.walCfg.level = alterReq.walLevel; + + walChanged = true; + } + + if (pVnode->config.tsdbCfg.keep0 != alterReq.daysToKeep0) { + pVnode->config.tsdbCfg.keep0 != alterReq.daysToKeep0; + if (!VND_IS_RSMA(pVnode)) { + pVnode->pTsdb->keepCfg.keep0 = alterReq.daysToKeep0; + } + } + + if (pVnode->config.tsdbCfg.keep1 != alterReq.daysToKeep1) { + pVnode->config.tsdbCfg.keep1 != alterReq.daysToKeep1; + if (!VND_IS_RSMA(pVnode)) { + pVnode->pTsdb->keepCfg.keep1 = alterReq.daysToKeep1; + } + } + + if (pVnode->config.tsdbCfg.keep2 != alterReq.daysToKeep2) { + pVnode->config.tsdbCfg.keep2 != alterReq.daysToKeep2; + if (!VND_IS_RSMA(pVnode)) { + pVnode->pTsdb->keepCfg.keep2 = alterReq.daysToKeep2; + } + } + + if (walChanged) { + walAlter(pVnode->pWal, pVnode->config.walCfg); + } + return 0; } @@ -1021,10 +1064,10 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq taosArrayDestroy(pRes->uidList); SVDeleteRsp rsp = {.affectedRows = pRes->affectedRows}; - int32_t ret = 0; + int32_t ret = 0; tEncodeSize(tEncodeSVDeleteRsp, &rsp, pRsp->contLen, ret); pRsp->pCont = rpcMallocCont(pRsp->contLen); - SEncoder ec = {0}; + SEncoder ec = {0}; tEncoderInit(&ec, pRsp->pCont, pRsp->contLen); tEncodeSVDeleteRsp(&ec, &rsp); tEncoderClear(&ec); From 97c238875e18b4977455331a8790704fc58108a8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 30 Jul 2022 18:19:14 +0800 Subject: [PATCH 59/63] refactor: do some internal refactor. --- source/libs/executor/src/timewindowoperator.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 31fd9a4c5f..dd0adb8da6 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4627,9 +4627,8 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); if (currTs != miaInfo->curTs) { outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs); + miaInfo->curTs = INT64_MIN; } - - miaInfo->curTs = INT64_MIN; } STimeWindow win = {0}; @@ -4644,6 +4643,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } + miaInfo->curTs = win.skey; int32_t currPos = startPos; STimeWindow currWin = win; @@ -4652,12 +4652,12 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR continue; } - miaInfo->curTs = currWin.skey; updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder); outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs); + miaInfo->curTs = INT64_MIN; currTs = tsCols[currPos]; currWin.skey = currTs; @@ -4670,6 +4670,8 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } + + miaInfo->curTs = currWin.skey; } updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); From 71b5cf5a3994544795465f31ee9c125ce83a339f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 30 Jul 2022 18:23:04 +0800 Subject: [PATCH 60/63] refactor: do some internal refactor. --- source/libs/executor/src/timewindowoperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index dd0adb8da6..63143875a3 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4681,6 +4681,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR if (currPos >= pBlock->info.rows) { // we need to see next block if exists } else { + ASSERT(0); outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs); miaInfo->curTs = INT64_MIN; } From bd473f7eb0fe772a65db177385215b6912ed2c3d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Sat, 30 Jul 2022 19:26:10 +0800 Subject: [PATCH 61/63] fix: dismiss compiling issues --- source/dnode/vnode/src/inc/vnodeInt.h | 1 + source/dnode/vnode/src/tsdb/tsdbOpen.c | 9 +++------ source/dnode/vnode/src/vnd/vnodeSvr.c | 13 +++++++++---- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index b1da5a7883..ce83b335d7 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -135,6 +135,7 @@ int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitMsgIter* p int32_t tsdbDeleteTableData(STsdb* pTsdb, int64_t version, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey); STsdbReader tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, void* pMemRef); +int32_t tsdbSetKeepCfg(STsdb* pTsdb, STsdbCfg* pCfg); // tq int tqInit(); diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c index 0b355d91b4..3c832e9c2a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbOpen.c +++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c @@ -15,11 +15,8 @@ #include "tsdb.h" -static int tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg); - -// implementation - -static int tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg) { +int32_t tsdbSetKeepCfg(STsdb *pTsdb, STsdbCfg *pCfg) { + STsdbKeepCfg *pKeepCfg = &pTsdb->keepCfg; pKeepCfg->precision = pCfg->precision; pKeepCfg->days = pCfg->days; pKeepCfg->keep0 = pCfg->keep0; @@ -56,7 +53,7 @@ int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKee pTsdb->pVnode = pVnode; taosThreadRwlockInit(&pTsdb->rwLock, NULL); if (!pKeepCfg) { - tsdbSetKeepCfg(&pTsdb->keepCfg, &pVnode->config.tsdbCfg); + tsdbSetKeepCfg(pTsdb, &pVnode->config.tsdbCfg); } else { memcpy(&pTsdb->keepCfg, pKeepCfg, sizeof(STsdbKeepCfg)); } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index e399b785f7..b230994a2f 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -978,6 +978,7 @@ static int32_t vnodeProcessAlterHashRangeReq(SVnode *pVnode, int64_t version, vo static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) { SAlterVnodeReq alterReq = {0}; bool walChanged = false; + bool tsdbChanged = false; if (tDeserializeSAlterVnodeReq(pReq, len, &alterReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -1010,26 +1011,30 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void if (pVnode->config.tsdbCfg.keep0 != alterReq.daysToKeep0) { pVnode->config.tsdbCfg.keep0 != alterReq.daysToKeep0; if (!VND_IS_RSMA(pVnode)) { - pVnode->pTsdb->keepCfg.keep0 = alterReq.daysToKeep0; + tsdbChanged = true; } } if (pVnode->config.tsdbCfg.keep1 != alterReq.daysToKeep1) { pVnode->config.tsdbCfg.keep1 != alterReq.daysToKeep1; if (!VND_IS_RSMA(pVnode)) { - pVnode->pTsdb->keepCfg.keep1 = alterReq.daysToKeep1; + tsdbChanged = true; } } if (pVnode->config.tsdbCfg.keep2 != alterReq.daysToKeep2) { pVnode->config.tsdbCfg.keep2 != alterReq.daysToKeep2; if (!VND_IS_RSMA(pVnode)) { - pVnode->pTsdb->keepCfg.keep2 = alterReq.daysToKeep2; + tsdbChanged = true; } } if (walChanged) { - walAlter(pVnode->pWal, pVnode->config.walCfg); + walAlter(pVnode->pWal, &pVnode->config.walCfg); + } + + if (tsdbChanged) { + tsdbSetKeepCfg(pVnode->pTsdb, &pVnode->config.tsdbCfg); } return 0; From 2d8afab93bebc85b53627911a62acbcf9d24bc15 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Sat, 30 Jul 2022 19:29:10 +0800 Subject: [PATCH 62/63] enh: support limit in explain --- source/libs/command/inc/commandInt.h | 19 +++++++ source/libs/command/src/explain.c | 80 +++++++++++++++++++++------- tests/script/tsim/query/explain.sim | 1 + 3 files changed, 81 insertions(+), 19 deletions(-) diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 104c007756..2ae4666ff6 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -63,6 +63,8 @@ extern "C" { #define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms" //append area +#define EXPLAIN_LIMIT_FORMAT "limit=%" PRId64 +#define EXPLAIN_SLIMIT_FORMAT "slimit=%" PRId64 #define EXPLAIN_LEFT_PARENTHESIS_FORMAT " (" #define EXPLAIN_RIGHT_PARENTHESIS_FORMAT ")" #define EXPLAIN_BLANK_FORMAT " " @@ -81,6 +83,8 @@ extern "C" { #define EXPLAIN_STRING_TYPE_FORMAT "%s" #define EXPLAIN_INPUT_ORDER_FORMAT "input_order=%s" #define EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT "output_order=%s" +#define EXPLAIN_OFFSET_FORMAT "offset=%d" +#define EXPLAIN_SOFFSET_FORMAT "soffset=%d" #define COMMAND_RESET_LOG "resetLog" #define COMMAND_SCHEDULE_POLICY "schedulePolicy" @@ -147,6 +151,21 @@ typedef struct SExplainCtx { #define EXPLAIN_SUM_ROW_NEW(...) tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, __VA_ARGS__) #define EXPLAIN_SUM_ROW_END() do { varDataSetLen(tbuf, tlen); tlen += VARSTR_HEADER_SIZE; } while (0) +#define EXPLAIN_ROW_APPEND_LIMIT_IMPL(_pLimit, sl) do { \ + if (_pLimit) { \ + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); \ + SLimitNode* pLimit = (SLimitNode*)_pLimit; \ + EXPLAIN_ROW_APPEND(((sl) ? EXPLAIN_SLIMIT_FORMAT : EXPLAIN_LIMIT_FORMAT), pLimit->limit); \ + if (pLimit->offset) { \ + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); \ + EXPLAIN_ROW_APPEND(((sl) ? EXPLAIN_SOFFSET_FORMAT : EXPLAIN_OFFSET_FORMAT), pLimit->offset);\ + } \ + } \ +} while (0) + +#define EXPLAIN_ROW_APPEND_LIMIT(_pLimit) EXPLAIN_ROW_APPEND_LIMIT_IMPL(_pLimit, false) +#define EXPLAIN_ROW_APPEND_SLIMIT(_pLimit) EXPLAIN_ROW_APPEND_LIMIT_IMPL(_pLimit, true) + #ifdef __cplusplus } #endif diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 66a94f7e28..c080b666cc 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -417,6 +417,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pTagScanNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -523,6 +525,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pTblScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTblScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTblScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTblScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -567,6 +571,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pSTblScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSTblScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pSTblScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pSTblScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -576,7 +582,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -601,6 +607,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pPrjNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPrjNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pPrjNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pPrjNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -610,7 +618,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -635,6 +643,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pJoinNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pJoinNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pJoinNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pJoinNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -650,7 +660,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i QRY_ERR_RET( nodesNodeToSQL(pJoinNode->pOnConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); - QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); } break; } @@ -681,6 +691,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pAggNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pAggNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pAggNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -690,7 +702,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -717,6 +729,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pIndefNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIndefNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pIndefNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pIndefNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -726,7 +740,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -755,6 +769,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pExchNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pExchNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pExchNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pExchNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -764,7 +780,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } QRY_ERR_RET(qExplainAppendGroupResRows(ctx, pExchNode->srcGroupId, level + 1)); @@ -826,6 +842,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pSortNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pSortNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pSortNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -835,7 +853,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -864,6 +882,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pIntNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pIntNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pIntNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); uint8_t precision = getIntervalPrecision(pIntNode); @@ -881,7 +901,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -906,6 +926,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pIntNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pIntNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pIntNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); uint8_t precision = getIntervalPrecision(pIntNode); @@ -923,7 +945,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -948,6 +970,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pFillNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pFillNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pFillNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pFillNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); if (pFillNode->pValues) { @@ -980,7 +1004,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1005,6 +1029,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pSessNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSessNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pSessNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pSessNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1018,7 +1044,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1046,6 +1072,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pStateNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pStateNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pStateNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pStateNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1058,7 +1086,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1084,6 +1112,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pPartNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPartNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pPartNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pPartNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1093,7 +1123,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1153,6 +1183,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pMergeNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pMergeNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pMergeNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pMergeNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1172,7 +1204,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1202,6 +1234,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pDistScanNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDistScanNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pDistScanNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pDistScanNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1211,7 +1245,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1241,6 +1275,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pLastRowNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pLastRowNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pLastRowNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pLastRowNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1250,7 +1286,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1310,6 +1346,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pSortNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pSortNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pSortNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1319,7 +1357,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1342,6 +1380,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pIntNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pIntNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pIntNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); uint8_t precision = getIntervalPrecision(pIntNode); @@ -1359,7 +1399,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1389,6 +1429,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pInterpNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pInterpNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pInterpNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pInterpNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); if (pInterpNode->pFillValues) { @@ -1424,7 +1466,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim index 44f5eb74e7..fcf91c45da 100644 --- a/tests/script/tsim/query/explain.sim +++ b/tests/script/tsim/query/explain.sim @@ -45,6 +45,7 @@ print ======== step3 sql explain verbose true select * from st1 where -2; sql explain verbose true select ts from tb1 where f1 > 0; sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00'; +sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1; sql explain verbose true select * from information_schema.user_stables where db_name='db2'; print ======== step4 From 734ffb583e72ce89198795d01746d220e04af9c2 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Sat, 30 Jul 2022 20:00:39 +0800 Subject: [PATCH 63/63] fix: fix assignment typos --- source/dnode/vnode/src/vnd/vnodeSvr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index b230994a2f..8aa3a7d296 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -1009,21 +1009,21 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void } if (pVnode->config.tsdbCfg.keep0 != alterReq.daysToKeep0) { - pVnode->config.tsdbCfg.keep0 != alterReq.daysToKeep0; + pVnode->config.tsdbCfg.keep0 = alterReq.daysToKeep0; if (!VND_IS_RSMA(pVnode)) { tsdbChanged = true; } } if (pVnode->config.tsdbCfg.keep1 != alterReq.daysToKeep1) { - pVnode->config.tsdbCfg.keep1 != alterReq.daysToKeep1; + pVnode->config.tsdbCfg.keep1 = alterReq.daysToKeep1; if (!VND_IS_RSMA(pVnode)) { tsdbChanged = true; } } if (pVnode->config.tsdbCfg.keep2 != alterReq.daysToKeep2) { - pVnode->config.tsdbCfg.keep2 != alterReq.daysToKeep2; + pVnode->config.tsdbCfg.keep2 = alterReq.daysToKeep2; if (!VND_IS_RSMA(pVnode)) { tsdbChanged = true; }