From 1ac52513d9d6065f19af8cea960c9b243a85a5fe Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 16 Aug 2022 14:31:59 +0800 Subject: [PATCH 01/72] add max to CI --- tests/system-test/2-query/max.py | 94 ++++++-------- tests/system-test/2-query/max_partition.py | 144 ++++++++++----------- tests/system-test/fulltest.sh | 7 +- 3 files changed, 119 insertions(+), 126 deletions(-) diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index 34442a3725..1cb08aeee8 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -5,10 +5,7 @@ import numpy as np class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) @@ -17,60 +14,57 @@ class TDTestCase: self.ts = 1537146000000 self.binary_str = 'taosdata' self.nchar_str = '涛思数据' - def max_check_stb_and_tb_base(self): + def max_check_stb_and_tb_base(self, dbname="db"): tdSql.prepare() intData = [] floatData = [] - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") + tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')") for i in range(self.rowNum): - tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) for i in ['ts','col11','col12','col13']: - for j in ['db.stb','stb','db.stb_1','stb_1']: - tdSql.error(f'select max({i} from {j} )') + for j in ['stb','stb_1']: + tdSql.error(f'select max({i} from {dbname}.{j} )') for i in range(1,11): - for j in ['db.stb','stb','db.stb_1','stb_1']: - tdSql.query(f"select max(col{i}) from {j}") + for j in ['stb', 'stb_1']: + tdSql.query(f"select max(col{i}) from {dbname}.{j}") if i<9: tdSql.checkData(0, 0, np.max(intData)) elif i>=9: tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col1) from stb_1 where col2<=5") + tdSql.query(f"select max(col1) from {dbname}.stb_1 where col2<=5") tdSql.checkData(0,0,5) - tdSql.query("select max(col1) from stb where col2<=5") + tdSql.query(f"select max(col1) from {dbname}.stb where col2<=5") tdSql.checkData(0,0,5) - tdSql.execute('drop database db') - def max_check_ntb_base(self): + def max_check_ntb_base(self, dbname="db"): tdSql.prepare() intData = [] floatData = [] - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''') for i in range(self.rowNum): - tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) for i in ['ts','col11','col12','col13']: - for j in ['db.ntb','ntb']: - tdSql.error(f'select max({i} from {j} )') + for j in ['ntb']: + tdSql.error(f'select max({i} from {dbname}.{j} )') for i in range(1,11): - for j in ['db.ntb','ntb']: - tdSql.query(f"select max(col{i}) from {j}") + for j in ['ntb']: + tdSql.query(f"select max(col{i}) from {dbname}.{j}") if i<9: tdSql.checkData(0, 0, np.max(intData)) elif i>=9: tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col1) from ntb where col2<=5") + tdSql.query(f"select max(col1) from {dbname}.ntb where col2<=5") tdSql.checkData(0,0,5) - tdSql.execute('drop database db') - def check_max_functions(self, tbname , col_name): @@ -90,55 +84,55 @@ class TDTestCase: tdLog.info(" max function work as expected, sql : %s "% max_sql) - def support_distributed_aggregate(self): + def support_distributed_aggregate(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"use {dbname} ") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) for i in range(1,21): if i ==1 or i == 4: continue else: - tbname = "ct"+f'{i}' + tbname = f"{dbname}.ct{i}" for j in range(9): tdSql.execute( f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -157,7 +151,7 @@ class TDTestCase: tdLog.info(" prepare data for distributed_aggregate done! ") # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -167,7 +161,7 @@ class TDTestCase: # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query("select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'") + tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -182,13 +176,13 @@ class TDTestCase: # check max function work status - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: tablenames.append(table_name[0]) - tdSql.query("desc stb1") + tdSql.query(f"desc {dbname}.stb1") col_names = tdSql.queryResult colnames = [] @@ -198,11 +192,7 @@ class TDTestCase: for tablename in tablenames: for colname in colnames: - self.check_max_functions(tablename,colname) - - # max function with basic filter - print(vnode_tables) - + self.check_max_functions(f"{dbname}.{tablename}", colname) def run(self): diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py index 4b9996d9c3..01c2677242 100644 --- a/tests/system-test/2-query/max_partition.py +++ b/tests/system-test/2-query/max_partition.py @@ -12,16 +12,15 @@ class TDTestCase: self.tb_nums = 10 self.ts = 1537146000000 - def prepare_datas(self, stb_name , tb_nums , row_nums ): - tdSql.execute(" use db ") - tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\ + def prepare_datas(self, stb_name , tb_nums , row_nums, dbname="db" ): + tdSql.execute(f" create stable {dbname}.{stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\ uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\ , t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ") for i in range(tb_nums): - tbname = f"sub_{stb_name}_{i}" + tbname = f"{dbname}.sub_{stb_name}_{i}" ts = self.ts + i*10000 - tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )") + tdSql.execute(f"create table {tbname} using {dbname}.{stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )") for row in range(row_nums): ts = self.ts + row*1000 @@ -31,191 +30,192 @@ class TDTestCase: ts = self.ts + row_nums*1000 + null*1000 tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )") - def basic_query(self): - tdSql.query("select count(*) from stb") + def basic_query(self, dbname="db"): + tdSql.query(f"select count(*) from {dbname}.stb") tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums) - tdSql.query("select max(c1) from stb") + tdSql.query(f"select max(c1) from {dbname}.stb") tdSql.checkData(0,0,(self.row_nums -1)) - tdSql.query(" select tbname , max(c1) from stb partition by tbname ") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname ") tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by t1 order by t1 ") + tdSql.query(f"select max(c1) from {dbname}.stb group by t1 order by t1 ") tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by c1 order by t1 ") - tdSql.query(" select max(t2) from stb group by c1 order by t1 ") - tdSql.query(" select max(c1) from stb group by tbname order by tbname ") + tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by t1 ") + tdSql.query(f"select max(t2) from {dbname}.stb group by c1 order by t1 ") + tdSql.query(f"select max(c1) from {dbname}.stb group by tbname order by tbname ") tdSql.checkRows(self.tb_nums) # bug need fix - tdSql.query(" select max(t2) from stb group by t2 order by t2 ") + tdSql.query(f"select max(t2) from {dbname}.stb group by t2 order by t2 ") tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by c1 order by c1 ") + tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by c1 ") tdSql.checkRows(self.row_nums+1) - tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ") + tdSql.query(f"select c1 , max(c1) from {dbname}.stb group by c1 order by c1 ") tdSql.checkRows(self.row_nums+1) # support selective functions - tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ") + tdSql.query(f"select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ") tdSql.checkRows(self.row_nums+1) - tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ") + tdSql.query(f"select c1, tbname , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ") tdSql.checkRows(self.row_nums+1) # bug need fix - # tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ") - # tdSql.checkRows(1) - # tdSql.checkData(0,0,"sub_stb_1") + tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 where c1 is null group by c1 order by c1 desc ") + tdSql.checkRows(1) + tdSql.checkData(0,0,"sub_stb_1") - tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)") + tdSql.query(f"select max(c1) ,c2 ,t2,tbname from {dbname}.stb group by abs(c1) order by abs(c1)") tdSql.checkRows(self.row_nums+1) - tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)") + tdSql.query(f"select abs(c1+c3), count(c1+c3) ,max(c1+t2) from {dbname}.stb group by abs(c1+c3) order by abs(c1+c3)") tdSql.checkRows(self.row_nums+1) - tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)") + tdSql.query(f"select max(c1+c3)+min(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)") tdSql.checkRows(self.row_nums+1) - tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2") - tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2") - tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)") + tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from {dbname}.stb group by abs(c1) order by abs(t1)+c2") + tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)+c2") + tdSql.query(f"select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from {dbname}.stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)") tdSql.checkRows(self.row_nums+1) - tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ") + tdSql.query(f"select max(c1) , max(t2) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ") tdSql.checkRows(2) - tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ") + tdSql.query(f"select max(c1) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ") tdSql.checkRows(2) - tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname ") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,1,self.row_nums-1) - tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1") - tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1") - tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2") + tdSql.query(f"select tbname , max(c2) from {dbname}.stb partition by t1 order by t1") + tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t1 order by t1") + tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t2 order by t2") # # bug need fix - tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2") + tdSql.query(f"select t2 , max(t2) from {dbname}.stb partition by t2 order by t2") tdSql.checkRows(self.tb_nums) - tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,1,self.row_nums-1) - tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by t2 order by t2") - tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc") + tdSql.query(f"select c2, max(c1) from {dbname}.stb partition by c2 order by c2 desc") tdSql.checkRows(self.tb_nums+1) tdSql.checkData(0,1,self.row_nums-1) - tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by c1 order by c2") - tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2") + tdSql.query(f"select tbname , abs(t2) from {dbname}.stb partition by c2 order by t2") tdSql.checkRows(self.tb_nums*(self.row_nums+5)) - tdSql.query("select max(c1) , count(t2) from stb partition by c2 ") + tdSql.query(f"select max(c1) , count(t2) from {dbname}.stb partition by c2 ") tdSql.checkRows(self.row_nums+1) tdSql.checkData(0,1,self.row_nums) - tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2") + tdSql.query(f"select count(c1) , max(t2) ,c2 from {dbname}.stb partition by c2 order by c2") tdSql.checkRows(self.row_nums+1) - tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname") + tdSql.query(f"select count(c1) , count(t1) ,max(c2) ,tbname from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) tdSql.checkCols(4) - tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1") + tdSql.query(f"select count(c1) , max(t2) ,t1 from {dbname}.stb partition by t1 order by t1") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,self.row_nums) # bug need fix - tdSql.query("select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)") + tdSql.query(f"select count(c1) , max(t2) ,abs(c1) from {dbname}.stb partition by abs(c1) order by abs(c1)") tdSql.checkRows(self.row_nums+1) - tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)") + tdSql.query(f"select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from {dbname}.stb partition by abs(c2) order by abs(c2)") tdSql.checkRows(self.row_nums+1) - tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))") + tdSql.query(f"select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from {dbname}.stb partition by abs(floor(c1)) order by abs(floor(c1))") tdSql.checkRows(self.row_nums+1) - tdSql.query("select tbname , max(c1) ,c1 from stb partition by tbname order by tbname") + tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,'sub_stb_0') tdSql.checkData(0,1,9) tdSql.checkData(0,2,9) - tdSql.query("select tbname ,top(c1,1) ,c1 from stb partition by tbname order by tbname") + tdSql.query(f"select tbname ,top(c1,1) ,c1 from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) - tdSql.query(" select c1 , sample(c1,2) from stb partition by tbname order by tbname ") + tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by tbname order by tbname ") tdSql.checkRows(self.tb_nums*2) # interval - tdSql.query("select max(c1) from stb interval(2s) sliding(1s)") + tdSql.query(f"select max(c1) from {dbname}.stb interval(2s) sliding(1s)") # bug need fix - tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)') + tdSql.query(f'select max(c1) from {dbname}.stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)') - tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ") + tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname interval(10s) slimit 5 soffset 1 ") - tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname interval(10s)") tdSql.checkRows(self.row_nums*2) - tdSql.query("select unique(c1) from stb partition by tbname order by tbname") + tdSql.query(f"select unique(c1) from {dbname}.stb partition by tbname order by tbname") - tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)") + tdSql.query(f"select tbname , count(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s)") tdSql.checkData(0,0,'sub_stb_1') tdSql.checkData(0,1,self.row_nums) - tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1") + tdSql.query(f"select c1 , mavg(c1 ,2 ) from {dbname}.stb partition by c1") tdSql.checkRows(90) - tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1") + tdSql.query(f"select c1 , diff(c1 , 0) from {dbname}.stb partition by c1") tdSql.checkRows(90) - tdSql.query("select c1 , csum(c1) from stb partition by c1") + tdSql.query(f"select c1 , csum(c1) from {dbname}.stb partition by c1") tdSql.checkRows(100) - tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1") + tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by c1 order by c1") tdSql.checkRows(21) # bug need fix - # tdSql.checkData(0,1,None) + tdSql.checkData(0,1,None) - tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1") + tdSql.query(f"select c1 , twa(c1) from {dbname}.stb partition by c1 order by c1") tdSql.checkRows(11) tdSql.checkData(0,1,None) - tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1") + tdSql.query(f"select c1 , irate(c1) from {dbname}.stb partition by c1 order by c1") tdSql.checkRows(11) tdSql.checkData(0,1,None) - tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1") + tdSql.query(f"select c1 , DERIVATIVE(c1,2,1) from {dbname}.stb partition by c1 order by c1") tdSql.checkRows(90) # bug need fix tdSql.checkData(0,1,None) - tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname slimit 5 soffset 0 ") tdSql.checkRows(10) - tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ") + tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s) sliding(5s) ") - tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') - tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') + tdSql.query(f'select max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') + tdSql.query(f'select tbname , max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') def run(self): + dbname = "db" tdSql.prepare() self.prepare_datas("stb",self.tb_nums,self.row_nums) self.basic_query() # # coverage case for taosd crash about bug fix - tdSql.query(" select sum(c1) from stb where t2+10 >1 ") - tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ") - tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ") - tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ") - tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ") + tdSql.query(f"select sum(c1) from {dbname}.stb where t2+10 >1 ") + tdSql.query(f"select count(c1),count(t1) from {dbname}.stb where -t2<1 ") + tdSql.query(f"select tbname ,max(ceil(c1)) from {dbname}.stb group by tbname ") + tdSql.query(f"select avg(abs(c1)) , tbname from {dbname}.stb group by tbname ") + tdSql.query(f"select t1,c1 from {dbname}.stb where abs(t2+c1)=1 ") def stop(self): diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index e9fbba86f9..55755ecf3b 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -124,6 +124,11 @@ python3 ./test.py -f 2-query/leastsquares.py python3 ./test.py -f 2-query/leastsquares.py -R python3 ./test.py -f 2-query/length.py python3 ./test.py -f 2-query/length.py -R +python3 ./test.py -f 2-query/max_partition.py +python3 ./test.py -f 2-query/max_partition.py -R +python3 ./test.py -f 2-query/max.py +python3 ./test.py -f 2-query/max.py -R + python3 ./test.py -f 1-insert/update_data.py @@ -145,7 +150,6 @@ python3 ./test.py -f 2-query/spread.py python3 ./test.py -f 2-query/timezone.py python3 ./test.py -f 2-query/Now.py python3 ./test.py -f 2-query/Today.py -python3 ./test.py -f 2-query/max.py python3 ./test.py -f 2-query/min.py python3 ./test.py -f 2-query/To_iso8601.py python3 ./test.py -f 2-query/To_unixtimestamp.py @@ -179,7 +183,6 @@ python3 ./test.py -f 2-query/tail.py python3 ./test.py -f 2-query/ttl_comment.py python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 2-query/queryQnode.py -python3 ./test.py -f 2-query/max_partition.py python3 ./test.py -f 2-query/tsbsQuery.py python3 ./test.py -f 6-cluster/5dnode1mnode.py From 056f943790065fb59d4e2fd07d06b4e3e8e20809 Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 16 Aug 2022 16:16:56 +0800 Subject: [PATCH 02/72] fix case --- tests/system-test/2-query/ltrim.py | 36 +++--- tests/system-test/2-query/min.py | 198 +++++++++-------------------- tests/system-test/fulltest.sh | 6 +- 3 files changed, 85 insertions(+), 155 deletions(-) diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py index 15f40a09c3..55c33b68eb 100644 --- a/tests/system-test/2-query/ltrim.py +++ b/tests/system-test/2-query/ltrim.py @@ -23,6 +23,7 @@ CHAR_COL = [ BINARY_COL, NCHAR_COL, ] BOOLEAN_COL = [ BOOL_COL, ] TS_TYPE_COL = [ TS_COL, ] +DBNAME = "db" class TDTestCase: @@ -120,16 +121,16 @@ class TDTestCase: return sqls - def __test_current(self): # sourcery skip: use-itertools-product + def __test_current(self, dbname=DBNAME): # sourcery skip: use-itertools-product tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__ltrim_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname=DBNAME): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__ltrim_err_check(tb): @@ -142,17 +143,16 @@ class TDTestCase: self.__test_error() - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname=DBNAME): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (t1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -162,29 +162,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname=DBNAME): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -200,7 +200,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -216,13 +216,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) diff --git a/tests/system-test/2-query/min.py b/tests/system-test/2-query/min.py index c27e9926ff..c47fa49237 100644 --- a/tests/system-test/2-query/min.py +++ b/tests/system-test/2-query/min.py @@ -14,196 +14,124 @@ class TDTestCase: self.ts = 1537146000000 def run(self): + dbname = "db" tdSql.prepare() intData = [] floatData = [] - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')") + tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') for i in range(self.rowNum): - tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) # max verifacation - tdSql.error("select min(ts) from stb_1") - tdSql.error("select min(ts) from db.stb_1") - tdSql.error("select min(col7) from stb_1") - tdSql.error("select min(col7) from db.stb_1") - tdSql.error("select min(col8) from stb_1") - tdSql.error("select min(col8) from db.stb_1") - tdSql.error("select min(col9) from stb_1") - tdSql.error("select min(col9) from db.stb_1") - # tdSql.error("select min(a) from stb_1") - # tdSql.error("select min(1) from stb_1") - tdSql.error("select min(now()) from stb_1") - tdSql.error("select min(count(c1),count(c2)) from stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") + tdSql.error(f"select min(col7) from {dbname}.stb_1") + tdSql.error(f"select min(col8) from {dbname}.stb_1") + tdSql.error(f"select min(col9) from {dbname}.stb_1") + tdSql.error(f"select min(a) from {dbname}.stb_1") + tdSql.query(f"select min(1) from {dbname}.stb_1") + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1") - tdSql.query("select min(col1) from stb_1") + tdSql.query(f"select min(col1) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col1) from db.stb_1") + tdSql.query(f"select min(col2) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from stb_1") + tdSql.query(f"select min(col3) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from db.stb_1") + tdSql.query(f"select min(col4) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from stb_1") + tdSql.query(f"select min(col11) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from db.stb_1") + tdSql.query(f"select min(col12) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from stb_1") + tdSql.query(f"select min(col13) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from db.stb_1") + tdSql.query(f"select min(col14) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from db.stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from db.stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from db.stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from db.stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col5) from stb_1") + tdSql.query(f"select min(col5) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col5) from db.stb_1") + tdSql.query(f"select min(col6) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from stb_1") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from db.stb_1") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col1) from stb_1 where col2>=5") + tdSql.query(f"select min(col1) from {dbname}.stb_1 where col2>=5") tdSql.checkData(0,0,5) - tdSql.error("select min(ts) from stb_1") - tdSql.error("select min(ts) from db.stb_1") - tdSql.error("select min(col7) from stb_1") - tdSql.error("select min(col7) from db.stb_1") - tdSql.error("select min(col8) from stb_1") - tdSql.error("select min(col8) from db.stb_1") - tdSql.error("select min(col9) from stb_1") - tdSql.error("select min(col9) from db.stb_1") - # tdSql.error("select min(a) from stb_1") - # tdSql.error("select min(1) from stb_1") - tdSql.error("select min(now()) from stb_1") - tdSql.error("select min(count(c1),count(c2)) from stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") + tdSql.error(f"select min(col7) from {dbname}.stb_1") + tdSql.error(f"select min(col8) from {dbname}.stb_1") + tdSql.error(f"select min(col9) from {dbname}.stb_1") + tdSql.error(f"select min(a) from {dbname}.stb_1") + tdSql.query(f"select min(1) from {dbname}.stb_1") + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1") - tdSql.query("select min(col1) from stb") + tdSql.query(f"select min(col1) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col1) from db.stb") + tdSql.query(f"select min(col2) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from stb") + tdSql.query(f"select min(col3) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from db.stb") + tdSql.query(f"select min(col4) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from stb") + tdSql.query(f"select min(col11) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from db.stb") + tdSql.query(f"select min(col12) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from stb") + tdSql.query(f"select min(col13) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from db.stb") + tdSql.query(f"select min(col14) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from db.stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from db.stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from db.stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from db.stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col5) from stb") + tdSql.query(f"select min(col5) from {dbname}.stb") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col5) from db.stb") + tdSql.query(f"select min(col6) from {dbname}.stb") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from stb") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from db.stb") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col1) from stb where col2>=5") + tdSql.query(f"select min(col1) from {dbname}.stb where col2>=5") tdSql.checkData(0,0,5) + tdSql.error(f"select min(ts) from {dbname}.ntb") + tdSql.error(f"select min(col7) from {dbname}.ntb") + tdSql.error(f"select min(col8) from {dbname}.ntb") + tdSql.error(f"select min(col9) from {dbname}.ntb") + tdSql.error(f"select min(a) from {dbname}.ntb") + tdSql.query(f"select min(1) from {dbname}.ntb") + tdSql.error(f"select min(now()) from {dbname}.ntb") + tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.ntb") - tdSql.error("select min(ts) from ntb") - tdSql.error("select min(ts) from db.ntb") - tdSql.error("select min(col7) from ntb") - tdSql.error("select min(col7) from db.ntb") - tdSql.error("select min(col8) from ntb") - tdSql.error("select min(col8) from db.ntb") - tdSql.error("select min(col9) from ntb") - tdSql.error("select min(col9) from db.ntb") - # tdSql.error("select min(a) from stb_1") - # tdSql.error("select min(1) from stb_1") - tdSql.error("select min(now()) from ntb") - tdSql.error("select min(count(c1),count(c2)) from ntb") - - tdSql.query("select min(col1) from ntb") + tdSql.query(f"select min(col1) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col1) from db.ntb") + tdSql.query(f"select min(col2) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from ntb") + tdSql.query(f"select min(col3) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from db.ntb") + tdSql.query(f"select min(col4) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from ntb") + tdSql.query(f"select min(col11) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from db.ntb") + tdSql.query(f"select min(col12) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from ntb") + tdSql.query(f"select min(col13) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from db.ntb") + tdSql.query(f"select min(col14) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from db.ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from db.ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from db.ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from db.ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col5) from ntb") + tdSql.query(f"select min(col5) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col5) from db.ntb") + tdSql.query(f"select min(col6) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from ntb") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from db.ntb") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col1) from ntb where col2>=5") + tdSql.query(f"select min(col1) from {dbname}.ntb where col2>=5") tdSql.checkData(0,0,5) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 55755ecf3b..7b782cd971 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -124,10 +124,14 @@ python3 ./test.py -f 2-query/leastsquares.py python3 ./test.py -f 2-query/leastsquares.py -R python3 ./test.py -f 2-query/length.py python3 ./test.py -f 2-query/length.py -R +python3 ./test.py -f 2-query/ltrim.py +# python3 ./test.py -f 2-query/ltrim.py -R python3 ./test.py -f 2-query/max_partition.py python3 ./test.py -f 2-query/max_partition.py -R python3 ./test.py -f 2-query/max.py python3 ./test.py -f 2-query/max.py -R +python3 ./test.py -f 2-query/min.py +python3 ./test.py -f 2-query/min.py -R python3 ./test.py -f 1-insert/update_data.py @@ -135,7 +139,6 @@ python3 ./test.py -f 1-insert/update_data.py python3 ./test.py -f 1-insert/delete_data.py python3 ./test.py -f 2-query/varchar.py -python3 ./test.py -f 2-query/ltrim.py python3 ./test.py -f 2-query/rtrim.py python3 ./test.py -f 2-query/upper.py python3 ./test.py -f 2-query/lower.py @@ -150,7 +153,6 @@ python3 ./test.py -f 2-query/spread.py python3 ./test.py -f 2-query/timezone.py python3 ./test.py -f 2-query/Now.py python3 ./test.py -f 2-query/Today.py -python3 ./test.py -f 2-query/min.py python3 ./test.py -f 2-query/To_iso8601.py python3 ./test.py -f 2-query/To_unixtimestamp.py python3 ./test.py -f 2-query/timetruncate.py From 09982db5d379f6c59ff0a1315efad8617bc16268 Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 16 Aug 2022 19:24:04 +0800 Subject: [PATCH 03/72] fix case --- tests/system-test/2-query/pow.py | 475 +++++++++++++++---------------- tests/system-test/fulltest.sh | 9 +- 2 files changed, 239 insertions(+), 245 deletions(-) diff --git a/tests/system-test/2-query/pow.py b/tests/system-test/2-query/pow.py index 1af8bd3839..32e4b78ff3 100644 --- a/tests/system-test/2-query/pow.py +++ b/tests/system-test/2-query/pow.py @@ -9,48 +9,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - - def prepare_datas(self): + + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) - + tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -65,14 +63,14 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_result_auto_pow2(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -87,10 +85,11 @@ class TDTestCase: for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): + if auto_result[row_index][col_index] is None and elem : + check_status = False + elif auto_result[row_index][col_index] is not None and (auto_result[row_index][col_index] - elem > 0.001): + print(auto_result[row_index][col_index],", elem: ", elem ) check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False else: pass if not check_status: @@ -104,7 +103,7 @@ class TDTestCase: origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -121,7 +120,8 @@ class TDTestCase: if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): check_status = False elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False + print(auto_result[row_index][col_index],", elem: ", elem ) + check_status = False else: pass if not check_status: @@ -135,7 +135,7 @@ class TDTestCase: origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -154,7 +154,8 @@ class TDTestCase: if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): check_status = False elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False + print(auto_result[row_index][col_index],", elem: ", elem ) + check_status = False else: pass if not check_status: @@ -162,160 +163,160 @@ class TDTestCase: sys.exit(1) else: tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def test_errors(self): + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select pow from t1", - # "select pow(-+--+c1 ,2) from t1", - # "select +-pow(c1,2) from t1", - # "select ++-pow(c1,2) from t1", - # "select ++--pow(c1,2) from t1", - # "select - -pow(c1,2)*0 from t1", - # "select pow(tbname+1,2) from t1 ", - "select pow(123--123,2)==1 from t1", - "select pow(c1,2) as 'd1' from t1", - "select pow(c1 ,c2 ,2) from t1", - "select pow(c1 ,NULL ,2) from t1", - "select pow(, 2) from t1;", - "select pow(pow(c1, 2) ab from t1)", - "select pow(c1 ,2 ) as int from t1", - "select pow from stb1", - # "select pow(-+--+c1) from stb1", - # "select +-pow(c1) from stb1", - # "select ++-pow(c1) from stb1", - # "select ++--pow(c1) from stb1", - # "select - -pow(c1)*0 from stb1", - # "select pow(tbname+1) from stb1 ", - "select pow(123--123 ,2)==1 from stb1", - "select pow(c1 ,2) as 'd1' from stb1", - "select pow(c1 ,c2 ,2 ) from stb1", - "select pow(c1 ,NULL,2) from stb1", - "select pow(,) from stb1;", - "select pow(pow(c1 , 2) ab from stb1)", - "select pow(c1 , 2) as int from stb1" + f"select pow from {dbname}.t1", + # f"select pow(-+--+c1 ,2) from {dbname}.t1", + # f"select +-pow(c1,2) from {dbname}.t1", + # f"select ++-pow(c1,2) from {dbname}.t1", + # f"select ++--pow(c1,2) from {dbname}.t1", + # f"select - -pow(c1,2)*0 from {dbname}.t1", + # f"select pow(tbname+1,2) from {dbname}.t1 ", + f"select pow(123--123,2)==1 from {dbname}.t1", + f"select pow(c1,2) as 'd1' from {dbname}.t1", + f"select pow(c1 ,c2 ,2) from {dbname}.t1", + f"select pow(c1 ,NULL ,2) from {dbname}.t1", + f"select pow(, 2) from {dbname}.t1;", + f"select pow(pow(c1, 2) ab from {dbname}.t1)", + f"select pow(c1 ,2 ) as int from {dbname}.t1", + f"select pow from {dbname}.stb1", + # f"select pow(-+--+c1) from {dbname}.stb1", + # f"select +-pow(c1) from {dbname}.stb1", + # f"select ++-pow(c1) from {dbname}.stb1", + # f"select ++--pow(c1) from {dbname}.stb1", + # f"select - -pow(c1)*0 from {dbname}.stb1", + # f"select pow(tbname+1) from {dbname}.stb1 ", + f"select pow(123--123 ,2)==1 from {dbname}.stb1", + f"select pow(c1 ,2) as 'd1' from {dbname}.stb1", + f"select pow(c1 ,c2 ,2 ) from {dbname}.stb1", + f"select pow(c1 ,NULL,2) from {dbname}.stb1", + f"select pow(,) from {dbname}.stb1;", + f"select pow(pow(c1 , 2) ab from {dbname}.stb1)", + f"select pow(c1 , 2) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - - def support_types(self): + + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select pow(ts ,2 ) from t1" , - "select pow(c7,c1 ) from t1", - "select pow(c8,c2) from t1", - "select pow(c9,c3 ) from t1", - "select pow(ts,c4 ) from ct1" , - "select pow(c7,c5 ) from ct1", - "select pow(c8,c6 ) from ct1", - "select pow(c9,c8 ) from ct1", - "select pow(ts,2 ) from ct3" , - "select pow(c7,2 ) from ct3", - "select pow(c8,2 ) from ct3", - "select pow(c9,2 ) from ct3", - "select pow(ts,2 ) from ct4" , - "select pow(c7,2 ) from ct4", - "select pow(c8,2 ) from ct4", - "select pow(c9,2 ) from ct4", - "select pow(ts,2 ) from stb1" , - "select pow(c7,2 ) from stb1", - "select pow(c8,2 ) from stb1", - "select pow(c9,2 ) from stb1" , + f"select pow(ts ,2 ) from {dbname}.t1" , + f"select pow(c7,c1 ) from {dbname}.t1", + f"select pow(c8,c2) from {dbname}.t1", + f"select pow(c9,c3 ) from {dbname}.t1", + f"select pow(ts,c4 ) from {dbname}.ct1" , + f"select pow(c7,c5 ) from {dbname}.ct1", + f"select pow(c8,c6 ) from {dbname}.ct1", + f"select pow(c9,c8 ) from {dbname}.ct1", + f"select pow(ts,2 ) from {dbname}.ct3" , + f"select pow(c7,2 ) from {dbname}.ct3", + f"select pow(c8,2 ) from {dbname}.ct3", + f"select pow(c9,2 ) from {dbname}.ct3", + f"select pow(ts,2 ) from {dbname}.ct4" , + f"select pow(c7,2 ) from {dbname}.ct4", + f"select pow(c8,2 ) from {dbname}.ct4", + f"select pow(c9,2 ) from {dbname}.ct4", + f"select pow(ts,2 ) from {dbname}.stb1" , + f"select pow(c7,2 ) from {dbname}.stb1", + f"select pow(c8,2 ) from {dbname}.stb1", + f"select pow(c9,2 ) from {dbname}.stb1" , - "select pow(ts,2 ) from stbbb1" , - "select pow(c7,2 ) from stbbb1", + f"select pow(ts,2 ) from {dbname}.stbbb1" , + f"select pow(c7,2 ) from {dbname}.stbbb1", - "select pow(ts,2 ) from tbname", - "select pow(c9,2 ) from tbname" + f"select pow(ts,2 ) from {dbname}.tbname", + f"select pow(c9,2 ) from {dbname}.tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ - "select pow(c1,2 ) from t1", - "select pow(c2,2 ) from t1", - "select pow(c3,2 ) from t1", - "select pow(c4,2 ) from t1", - "select pow(c5,2 ) from t1", - "select pow(c6,2 ) from t1", + f"select pow(c1,2 ) from {dbname}.t1", + f"select pow(c2,2 ) from {dbname}.t1", + f"select pow(c3,2 ) from {dbname}.t1", + f"select pow(c4,2 ) from {dbname}.t1", + f"select pow(c5,2 ) from {dbname}.t1", + f"select pow(c6,2 ) from {dbname}.t1", - "select pow(c1,2 ) from ct1", - "select pow(c2,2 ) from ct1", - "select pow(c3,2 ) from ct1", - "select pow(c4,2 ) from ct1", - "select pow(c5,2 ) from ct1", - "select pow(c6,2 ) from ct1", + f"select pow(c1,2 ) from {dbname}.ct1", + f"select pow(c2,2 ) from {dbname}.ct1", + f"select pow(c3,2 ) from {dbname}.ct1", + f"select pow(c4,2 ) from {dbname}.ct1", + f"select pow(c5,2 ) from {dbname}.ct1", + f"select pow(c6,2 ) from {dbname}.ct1", - "select pow(c1,2 ) from ct3", - "select pow(c2,2 ) from ct3", - "select pow(c3,2 ) from ct3", - "select pow(c4,2 ) from ct3", - "select pow(c5,2 ) from ct3", - "select pow(c6,2 ) from ct3", + f"select pow(c1,2 ) from {dbname}.ct3", + f"select pow(c2,2 ) from {dbname}.ct3", + f"select pow(c3,2 ) from {dbname}.ct3", + f"select pow(c4,2 ) from {dbname}.ct3", + f"select pow(c5,2 ) from {dbname}.ct3", + f"select pow(c6,2 ) from {dbname}.ct3", - "select pow(c1,2 ) from stb1", - "select pow(c2,2 ) from stb1", - "select pow(c3,2 ) from stb1", - "select pow(c4,2 ) from stb1", - "select pow(c5,2 ) from stb1", - "select pow(c6,2 ) from stb1", + f"select pow(c1,2 ) from {dbname}.stb1", + f"select pow(c2,2 ) from {dbname}.stb1", + f"select pow(c3,2 ) from {dbname}.stb1", + f"select pow(c4,2 ) from {dbname}.stb1", + f"select pow(c5,2 ) from {dbname}.stb1", + f"select pow(c6,2 ) from {dbname}.stb1", - "select pow(c6,2) as alisb from stb1", - "select pow(c6,2) alisb from stb1", + f"select pow(c6,2) as alisb from {dbname}.stb1", + f"select pow(c6,2) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - - def basic_pow_function(self): - # basic query - tdSql.query("select c1 from ct3") + def basic_pow_function(self, dbname="db"): + + # basic query + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select pow(c1 ,2) from ct3") + tdSql.query(f"select pow(c1 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c2 ,2) from ct3") + tdSql.query(f"select pow(c2 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c3 ,2) from ct3") + tdSql.query(f"select pow(c3 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c4 ,2) from ct3") + tdSql.query(f"select pow(c4 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c5 ,2) from ct3") + tdSql.query(f"select pow(c5 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c6 ,2) from ct3") + tdSql.query(f"select pow(c6 ,2) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select pow(c1 ,2) from t1") + tdSql.query(f"select pow(c1 ,2) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 1.000000000) tdSql.checkData(3 , 0, 9.000000000) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,2), pow(c2 ,2) ,pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from t1") - self.check_result_auto_pow1( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,1), pow(c2 ,1) ,pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from t1") - self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,-10), pow(c2 ,-10) ,pow(c3, -10), pow(c4 ,-10), pow(c5 ,-10) from t1") - + self.check_result_auto_pow2( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,2), pow(c2 ,2) ,pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from {dbname}.t1") + self.check_result_auto_pow1( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,1), pow(c2 ,1) ,pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from {dbname}.t1") + self.check_result_auto_pow__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,-10), pow(c2 ,-10) ,pow(c3, -10), pow(c4 ,-10), pow(c5 ,-10) from {dbname}.t1") + # used for sub table - tdSql.query("select c1 ,pow(c1 ,2) from ct1") + tdSql.query(f"select c1 ,pow(c1 ,2) from {dbname}.ct1") tdSql.checkData(0, 1, 64.000000000) tdSql.checkData(1 , 1, 49.000000000) tdSql.checkData(3 , 1, 25.000000000) @@ -323,7 +324,7 @@ class TDTestCase: # # test bug fix for pow(c1,c2) - tdSql.query("select c1, c5 ,pow(c1,c5) from ct4") + tdSql.query(f"select c1, c5 ,pow(c1,c5) from {dbname}.ct4") tdSql.checkData(0 , 2, None) tdSql.checkData(1 , 2, 104577724.506799981) tdSql.checkData(2 , 2, 3684781.623933245) @@ -331,11 +332,11 @@ class TDTestCase: tdSql.checkData(4 , 2, 7573.273783071) - self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c4,2), pow(c5,2) from ct1") - self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,-10), pow(c2,-10) ,pow(c3,-10), pow(c4,-10), pow(c5,-10) from ct1") + self.check_result_auto_pow2( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.ct1") + self.check_result_auto_pow__10( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,-10), pow(c2,-10) ,pow(c3,-10), pow(c4,-10), pow(c5,-10) from {dbname}.ct1") # nest query for pow functions - tdSql.query("select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from ct1;") + tdSql.query(f"select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 8) tdSql.checkData(0 , 1 , 64.000000000) tdSql.checkData(0 , 2 , 4096.000000000) @@ -351,24 +352,24 @@ class TDTestCase: tdSql.checkData(4 , 2 , 0.000000000) tdSql.checkData(4 , 3 , 0.000000000) - # # used for stable table - - tdSql.query("select pow(c1, 2) from stb1") + # # used for stable table + + tdSql.query(f"select pow(c1, 2) from {dbname}.stb1") tdSql.checkRows(25) - + # used for not exists table - tdSql.error("select pow(c1, 2) from stbbb1") - tdSql.error("select pow(c1, 2) from tbname") - tdSql.error("select pow(c1, 2) from ct5") + tdSql.error(f"select pow(c1, 2) from {dbname}.stbbb1") + tdSql.error(f"select pow(c1, 2) from {dbname}.tbname") + tdSql.error(f"select pow(c1, 2) from {dbname}.ct5") - # mix with common col - tdSql.query("select c1, pow(c1 ,2) from ct1") + # mix with common col + tdSql.query(f"select c1, pow(c1 ,2) from {dbname}.ct1") tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 1 ,64.000000000) tdSql.checkData(4 , 0 ,0) tdSql.checkData(4 , 1 ,0.000000000) - tdSql.query("select c1, pow(c1,2) from ct4") + tdSql.query(f"select c1, pow(c1,2) from {dbname}.ct4") tdSql.checkData(0 , 0 , None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,5) @@ -377,45 +378,45 @@ class TDTestCase: tdSql.checkData(5 , 1 ,None) # mix with common functions - tdSql.query("select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from ct4 ") + tdSql.query(f"select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,36.000000000) tdSql.checkData(3 , 2 ,36.000000000) tdSql.checkData(3 , 3 ,5.169925001) - tdSql.query("select c1, pow(c1,1),c5, floor(c5 ) from stb1 ") + tdSql.query(f"select c1, pow(c1,1),c5, floor(c5 ) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from stb1 ") - tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from ct1 ") - tdSql.error("select pow(c1 ,2), count(c5) from stb1 ") - tdSql.error("select pow(c1 ,2), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") + - # bug fix for count - tdSql.query("select count(c1) from ct4 ") + tdSql.query(f"select count(c1) from {dbname}.ct4 ") tdSql.checkData(0,0,9) - tdSql.query("select count(*) from ct4 ") + tdSql.query(f"select count(*) from {dbname}.ct4 ") tdSql.checkData(0,0,12) - tdSql.query("select count(c1) from stb1 ") + tdSql.query(f"select count(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,22) - tdSql.query("select count(*) from stb1 ") + tdSql.query(f"select count(*) from {dbname}.stb1 ") tdSql.checkData(0,0,25) - # # bug fix for compute - tdSql.query("select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from ct4 ") + # # bug fix for compute + tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -423,7 +424,7 @@ class TDTestCase: tdSql.checkData(1, 1, 64.000000000) tdSql.checkData(1, 2, 16.000000000) - tdSql.query(" select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from ct4") + tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -431,87 +432,86 @@ class TDTestCase: tdSql.checkData(1, 1, 64.000000000) tdSql.checkData(1, 2, 62.310000000) - tdSql.query("select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from ct1") + tdSql.query(f"select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, pow(c1, 100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, pow(c1, 10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def pow_base_test(self): + def pow_base_test(self, dbname="db"): # base is an regular number ,int or double - tdSql.query("select c1, pow(c1, 2) from ct1") + tdSql.query(f"select c1, pow(c1, 2) from {dbname}.ct1") tdSql.checkData(0, 1,64.000000000) - tdSql.query("select c1, pow(c1, 2.0) from ct1") + tdSql.query(f"select c1, pow(c1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 64.000000000) - tdSql.query("select c1, pow(1, 2.0) from ct1") + tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 1.000000000) tdSql.checkRows(13) # # bug for compute in functions - # tdSql.query("select c1, abs(1/0) from ct1") + # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1") # tdSql.checkData(0, 0, 8) # tdSql.checkData(0, 1, 1) - tdSql.query("select c1, pow(1, 2.0) from ct1") + tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 1.000000000) tdSql.checkRows(13) # two cols start pow(x,y) - tdSql.query("select c1,c2, pow(c1,c2) from ct1") + tdSql.query(f"select c1,c2, pow(c1,c2) from {dbname}.ct1") tdSql.checkData(0, 2, None) tdSql.checkData(1, 2, None) tdSql.checkData(4, 2, 1.000000000) - tdSql.query("select c1,c2, pow(c2,c1) from ct1") + tdSql.query(f"select c1,c2, pow(c2,c1) from {dbname}.ct1") tdSql.checkData(0, 2, 3897131646727578700481513520437089271808.000000000) tdSql.checkData(1, 2, 17217033054561120738612297152331776.000000000) tdSql.checkData(4, 2, 1.000000000) - tdSql.query("select c1, pow(2.0 , c1) from ct1") + tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1") tdSql.checkData(0, 1, 256.000000000) tdSql.checkData(1, 1, 128.000000000) tdSql.checkData(4, 1, 1.000000000) - tdSql.query("select c1, pow(2.0 , c1) from ct1") + tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1") tdSql.checkData(0, 1, 256.000000000) tdSql.checkData(1, 1, 128.000000000) tdSql.checkData(4, 1, 1.000000000) - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -519,7 +519,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,64.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -527,7 +527,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,25.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -535,7 +535,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,25.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( " select c5 from stb1 where c1 > 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( " select t1,c5 from stb1 order by ts " , "select pow(t1,2), pow(c5,2) from stb1 order by ts" ) - self.check_result_auto_pow2( " select t1,c5 from stb1 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 order by tbname" ) - self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) , pow(c5,2) from stb1 where c1 > 0 order by tbname" ) - pass - - + def support_super_table_test(self, dbname="db"): + self.check_result_auto_pow2( f"select c5 from {dbname}.stb1 order by ts " , f"select pow(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_pow2( f"select c5 from {dbname}.stb1 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_pow2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow2( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select pow(t1,2), pow(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_pow2( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_pow2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) , pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: pow basic query ============") + tdLog.printNoPrefix("==========step4: pow basic query ============") self.basic_pow_function() - tdLog.printNoPrefix("==========step5: big number pow query ============") + tdLog.printNoPrefix("==========step5: big number pow query ============") self.test_big_number() - tdLog.printNoPrefix("==========step6: base number for pow query ============") + tdLog.printNoPrefix("==========step6: base number for pow query ============") self.pow_base_test() - tdLog.printNoPrefix("==========step7: pow boundary query ============") + tdLog.printNoPrefix("==========step7: pow boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step8: pow filter query ============") + tdLog.printNoPrefix("==========step8: pow filter query ============") self.abs_func_filter() tdLog.printNoPrefix("==========step9: check pow result of stable query ============") - self.support_super_table_test() + self.support_super_table_test() def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 7b782cd971..65cfcd674e 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -132,6 +132,12 @@ python3 ./test.py -f 2-query/max.py python3 ./test.py -f 2-query/max.py -R python3 ./test.py -f 2-query/min.py python3 ./test.py -f 2-query/min.py -R +python3 ./test.py -f 2-query/Now.py +python3 ./test.py -f 2-query/Now.py -R +python3 ./test.py -f 2-query/percentile.py +python3 ./test.py -f 2-query/percentile.py -R +python3 ./test.py -f 2-query/pow.py +python3 ./test.py -f 2-query/pow.py -R python3 ./test.py -f 1-insert/update_data.py @@ -151,7 +157,6 @@ python3 ./test.py -f 2-query/spread.py python3 ./test.py -f 2-query/timezone.py -python3 ./test.py -f 2-query/Now.py python3 ./test.py -f 2-query/Today.py python3 ./test.py -f 2-query/To_iso8601.py python3 ./test.py -f 2-query/To_unixtimestamp.py @@ -160,10 +165,8 @@ python3 ./test.py -f 2-query/Timediff.py python3 ./test.py -f 2-query/json_tag.py python3 ./test.py -f 2-query/top.py -python3 ./test.py -f 2-query/percentile.py python3 ./test.py -f 2-query/round.py python3 ./test.py -f 2-query/log.py -python3 ./test.py -f 2-query/pow.py python3 ./test.py -f 2-query/sqrt.py python3 ./test.py -f 2-query/sin.py python3 ./test.py -f 2-query/tan.py From 3a5f3203a021be79255cedb4bbf6b41c63d90f69 Mon Sep 17 00:00:00 2001 From: cpwu Date: Wed, 17 Aug 2022 16:11:37 +0800 Subject: [PATCH 04/72] fix case --- tests/pytest/util/common.py | 12 +- tests/system-test/2-query/qnodeCluster.py | 48 +-- .../2-query/query_cols_tags_and_or.py | 21 +- tests/system-test/2-query/round.py | 343 +++++++++--------- tests/system-test/2-query/rtrim.py | 39 +- tests/system-test/2-query/sample.py | 323 ++++++++--------- tests/system-test/fulltest.sh | 12 +- 7 files changed, 391 insertions(+), 407 deletions(-) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 33ef92bf73..132c1f029c 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -199,22 +199,22 @@ class TDCom: res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0]) return res - def cleanTb(self, type="taosc"): + def cleanTb(self, type="taosc", dbname="db"): ''' type is taosc or restful ''' - query_sql = "show stables" + query_sql = f"show {dbname}.stables" res_row_list = tdSql.query(query_sql, True) stb_list = map(lambda x: x[0], res_row_list) for stb in stb_list: if type == "taosc": - tdSql.execute(f'drop table if exists `{stb}`') + tdSql.execute(f'drop table if exists `{dbname}.{stb}`') if not stb[0].isdigit(): - tdSql.execute(f'drop table if exists {stb}') + tdSql.execute(f'drop table if exists {dbname}.{stb}') elif type == "restful": - self.restApiPost(f"drop table if exists `{stb}`") + self.restApiPost(f"drop table if exists `{dbname}.{stb}`") if not stb[0].isdigit(): - self.restApiPost(f"drop table if exists {stb}") + self.restApiPost(f"drop table if exists {dbname}.{stb}") def dateToTs(self, datetime_input): return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) diff --git a/tests/system-test/2-query/qnodeCluster.py b/tests/system-test/2-query/qnodeCluster.py index f68eb58a7a..9e49bff938 100644 --- a/tests/system-test/2-query/qnodeCluster.py +++ b/tests/system-test/2-query/qnodeCluster.py @@ -13,9 +13,9 @@ from util.common import * sys.path.append("./6-cluster/") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck -import threading +import threading class TDTestCase: @@ -28,7 +28,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), True) + tdSql.init(conn.cursor(), False) def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1): tsql.execute("use %s" %dbName) @@ -47,7 +47,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -55,7 +55,7 @@ class TDTestCase: dbname="db_tsbs" stabname1="readings" stabname2="diagnostics" - ctbnamePre1="rct" + ctbnamePre1="rct" ctbnamePre2="dct" ctbNums=40 self.ctbNums=ctbNums @@ -73,7 +73,7 @@ class TDTestCase: self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums) - for j in range(ctbNums): + for j in range(ctbNums): for i in range(rowNUms): tdSql.execute( f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )" @@ -109,19 +109,19 @@ class TDTestCase: def tsbsIotQuery(self,tdSql): tdSql.execute("use db_tsbs") - + # test interval and partition tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ") # print(tdSql.queryResult) parRows=tdSql.queryRows tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ") tdSql.checkRows(parRows) - - - # # test insert into + + + # # test insert into # tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") # tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") - + # tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") @@ -141,7 +141,7 @@ class TDTestCase: tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;") - # 2 stationary-trucks + # 2 stationary-trucks tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)") tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name") @@ -156,7 +156,7 @@ class TDTestCase: tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;") - # # 6. avg-daily-driving-session + # # 6. avg-daily-driving-session # #taosc core dumped # tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))") # tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;") @@ -166,7 +166,7 @@ class TDTestCase: # 7. avg-load tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;") - # 8. daily-activity + # 8. daily-activity tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") @@ -184,7 +184,7 @@ class TDTestCase: tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;") - + #it's already supported: # last-loc tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;") @@ -192,7 +192,7 @@ class TDTestCase: #2. low-fuel tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;") - + # 3. avg-vs-projected-fuel-consumption tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet") @@ -213,16 +213,16 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) - + tdSql.query("select * from information_schema.ins_dnodes;") tdLog.debug(tdSql.queryResult) clusterComCheck.checkDnodes(dnodeNumbers) - tdLog.info("create database and stable") + tdLog.info("create database and stable") tdDnodes=cluster.dnodes stopcount =0 threads=[] @@ -234,7 +234,7 @@ class TDTestCase: for tr in threads: tr.start() - tdLog.info("Take turns stopping %s "%stopRole) + tdLog.info("Take turns stopping %s "%stopRole) while stopcount < restartNumbers: tdLog.info(" restart loop: %d"%stopcount ) if stopRole == "mnode": @@ -242,7 +242,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -254,7 +254,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): @@ -265,12 +265,12 @@ class TDTestCase: tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() - def run(self): + def run(self): tdLog.printNoPrefix("==========step1:create database and table,insert data ==============") self.createCluster() self.prepareData() diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py index e0fb986d79..af3fbb83c0 100644 --- a/tests/system-test/2-query/query_cols_tags_and_or.py +++ b/tests/system-test/2-query/query_cols_tags_and_or.py @@ -19,7 +19,7 @@ class TDTestCase: def init(self, conn, logSql): ## add for TD-6672 tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor(), False) def insertData(self, tb_name): insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)', @@ -37,17 +37,17 @@ class TDTestCase: for sql in insert_sql_list: tdSql.execute(sql) - def initTb(self): - tdCom.cleanTb() - tb_name = tdCom.getLongName(8, "letters") + def initTb(self, dbname="db"): + tdCom.cleanTb(dbname) + tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}' tdSql.execute( f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)") self.insertData(tb_name) return tb_name - def initStb(self, count=5): - tdCom.cleanTb() - tb_name = tdCom.getLongName(8, "letters") + def initStb(self, count=5, dbname="db"): + tdCom.cleanTb(dbname) + tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}' tdSql.execute( f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)") for i in range(1, count+1): @@ -56,9 +56,10 @@ class TDTestCase: self.insertData(f'{tb_name}_sub_{i}') return tb_name - def initTwoStb(self): - tdCom.cleanTb() - tb_name = tdCom.getLongName(8, "letters") + def initTwoStb(self, dbname="db"): + tdCom.cleanTb(dbname) + tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}' + # tb_name = tdCom.getLongName(8, "letters") tb_name1 = f'{tb_name}1' tb_name2 = f'{tb_name}2' tdSql.execute( diff --git a/tests/system-test/2-query/round.py b/tests/system-test/2-query/round.py index 551e225a4d..1d69d3c9af 100644 --- a/tests/system-test/2-query/round.py +++ b/tests/system-test/2-query/round.py @@ -8,49 +8,46 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -94,68 +91,68 @@ class TDTestCase: else: tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query ) - def test_errors(self): + def test_errors(self, dbname="db"): error_sql_lists = [ - "select round from t1", - # "select round(-+--+c1) from t1", - # "select +-round(c1) from t1", - # "select ++-round(c1) from t1", - # "select ++--round(c1) from t1", - # "select - -round(c1)*0 from t1", - # "select round(tbname+1) from t1 ", - "select round(123--123)==1 from t1", - "select round(c1) as 'd1' from t1", - "select round(c1 ,c2 ) from t1", - "select round(c1 ,NULL) from t1", - "select round(,) from t1;", - "select round(round(c1) ab from t1)", - "select round(c1) as int from t1", - "select round from stb1", - # "select round(-+--+c1) from stb1", - # "select +-round(c1) from stb1", - # "select ++-round(c1) from stb1", - # "select ++--round(c1) from stb1", - # "select - -round(c1)*0 from stb1", - # "select round(tbname+1) from stb1 ", - "select round(123--123)==1 from stb1", - "select round(c1) as 'd1' from stb1", - "select round(c1 ,c2 ) from stb1", - "select round(c1 ,NULL) from stb1", - "select round(,) from stb1;", - "select round(round(c1) ab from stb1)", - "select round(c1) as int from stb1" + f"select round from {dbname}.t1", + # f"select round(-+--+c1) from {dbname}.t1", + # f"select +-round(c1) from {dbname}.t1", + # f"select ++-round(c1) from {dbname}.t1", + # f"select ++--round(c1) from {dbname}.t1", + # f"select - -round(c1)*0 from {dbname}.t1", + # f"select round(tbname+1) from {dbname}.t1 ", + f"select round(123--123)==1 from {dbname}.t1", + f"select round(c1) as 'd1' from {dbname}.t1", + f"select round(c1 ,c2 ) from {dbname}.t1", + f"select round(c1 ,NULL) from {dbname}.t1", + f"select round(,) from {dbname}.t1;", + f"select round(round(c1) ab from {dbname}.t1)", + f"select round(c1) as int from {dbname}.t1", + f"select round from {dbname}.stb1", + # f"select round(-+--+c1) from {dbname}.stb1", + # f"select +-round(c1) from {dbname}.stb1", + # f"select ++-round(c1) from {dbname}.stb1", + # f"select ++--round(c1) from {dbname}.stb1", + # f"select - -round(c1)*0 from {dbname}.stb1", + # f"select round(tbname+1) from {dbname}.stb1 ", + f"select round(123--123)==1 from {dbname}.stb1", + f"select round(c1) as 'd1' from {dbname}.stb1", + f"select round(c1 ,c2 ) from {dbname}.stb1", + f"select round(c1 ,NULL) from {dbname}.stb1", + f"select round(,) from {dbname}.stb1;", + f"select round(round(c1) ab from {dbname}.stb1)", + f"select round(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - def support_types(self): + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select round(ts) from t1" , - "select round(c7) from t1", - "select round(c8) from t1", - "select round(c9) from t1", - "select round(ts) from ct1" , - "select round(c7) from ct1", - "select round(c8) from ct1", - "select round(c9) from ct1", - "select round(ts) from ct3" , - "select round(c7) from ct3", - "select round(c8) from ct3", - "select round(c9) from ct3", - "select round(ts) from ct4" , - "select round(c7) from ct4", - "select round(c8) from ct4", - "select round(c9) from ct4", - "select round(ts) from stb1" , - "select round(c7) from stb1", - "select round(c8) from stb1", - "select round(c9) from stb1" , + f"select round(ts) from {dbname}.t1" , + f"select round(c7) from {dbname}.t1", + f"select round(c8) from {dbname}.t1", + f"select round(c9) from {dbname}.t1", + f"select round(ts) from {dbname}.ct1" , + f"select round(c7) from {dbname}.ct1", + f"select round(c8) from {dbname}.ct1", + f"select round(c9) from {dbname}.ct1", + f"select round(ts) from {dbname}.ct3" , + f"select round(c7) from {dbname}.ct3", + f"select round(c8) from {dbname}.ct3", + f"select round(c9) from {dbname}.ct3", + f"select round(ts) from {dbname}.ct4" , + f"select round(c7) from {dbname}.ct4", + f"select round(c8) from {dbname}.ct4", + f"select round(c9) from {dbname}.ct4", + f"select round(ts) from {dbname}.stb1" , + f"select round(c7) from {dbname}.stb1", + f"select round(c8) from {dbname}.stb1", + f"select round(c9) from {dbname}.stb1" , - "select round(ts) from stbbb1" , - "select round(c7) from stbbb1", + f"select round(ts) from {dbname}.stbbb1" , + f"select round(c7) from {dbname}.stbbb1", - "select round(ts) from tbname", - "select round(c9) from tbname" + f"select round(ts) from {dbname}.tbname", + f"select round(c9) from {dbname}.tbname" ] @@ -164,127 +161,127 @@ class TDTestCase: type_sql_lists = [ - "select round(c1) from t1", - "select round(c2) from t1", - "select round(c3) from t1", - "select round(c4) from t1", - "select round(c5) from t1", - "select round(c6) from t1", + f"select round(c1) from {dbname}.t1", + f"select round(c2) from {dbname}.t1", + f"select round(c3) from {dbname}.t1", + f"select round(c4) from {dbname}.t1", + f"select round(c5) from {dbname}.t1", + f"select round(c6) from {dbname}.t1", - "select round(c1) from ct1", - "select round(c2) from ct1", - "select round(c3) from ct1", - "select round(c4) from ct1", - "select round(c5) from ct1", - "select round(c6) from ct1", + f"select round(c1) from {dbname}.ct1", + f"select round(c2) from {dbname}.ct1", + f"select round(c3) from {dbname}.ct1", + f"select round(c4) from {dbname}.ct1", + f"select round(c5) from {dbname}.ct1", + f"select round(c6) from {dbname}.ct1", - "select round(c1) from ct3", - "select round(c2) from ct3", - "select round(c3) from ct3", - "select round(c4) from ct3", - "select round(c5) from ct3", - "select round(c6) from ct3", + f"select round(c1) from {dbname}.ct3", + f"select round(c2) from {dbname}.ct3", + f"select round(c3) from {dbname}.ct3", + f"select round(c4) from {dbname}.ct3", + f"select round(c5) from {dbname}.ct3", + f"select round(c6) from {dbname}.ct3", - "select round(c1) from stb1", - "select round(c2) from stb1", - "select round(c3) from stb1", - "select round(c4) from stb1", - "select round(c5) from stb1", - "select round(c6) from stb1", + f"select round(c1) from {dbname}.stb1", + f"select round(c2) from {dbname}.stb1", + f"select round(c3) from {dbname}.stb1", + f"select round(c4) from {dbname}.stb1", + f"select round(c5) from {dbname}.stb1", + f"select round(c6) from {dbname}.stb1", - "select round(c6) as alisb from stb1", - "select round(c6) alisb from stb1", + f"select round(c6) as alisb from {dbname}.stb1", + f"select round(c6) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - def basic_round_function(self): + def basic_round_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select round(c1) from ct3") + tdSql.query(f"select round(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c2) from ct3") + tdSql.query(f"select round(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c3) from ct3") + tdSql.query(f"select round(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c4) from ct3") + tdSql.query(f"select round(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c5) from ct3") + tdSql.query(f"select round(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c6) from ct3") + tdSql.query(f"select round(c6) from {dbname}.ct3") # used for regular table - tdSql.query("select round(c1) from t1") + tdSql.query(f"select round(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 1) tdSql.checkData(3 , 0, 3) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from t1") + self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.t1") # used for sub table - tdSql.query("select round(c1) from ct1") + tdSql.query(f"select round(c1) from {dbname}.ct1") tdSql.checkData(0, 0, 8) tdSql.checkData(1 , 0, 7) tdSql.checkData(3 , 0, 5) tdSql.checkData(5 , 0, 4) - tdSql.query("select round(c1) from ct1") - self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct1") - self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" ) + tdSql.query(f"select round(c1) from {dbname}.ct1") + self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct1") + self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct1;",f"select c1 from {dbname}.ct1" ) # used for stable table - tdSql.query("select round(c1) from stb1") + tdSql.query(f"select round(c1) from {dbname}.stb1") tdSql.checkRows(25) - self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct4") - self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" ) + self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct4 ", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct4") + self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct4;" , f"select c1 from {dbname}.ct4" ) # used for not exists table - tdSql.error("select round(c1) from stbbb1") - tdSql.error("select round(c1) from tbname") - tdSql.error("select round(c1) from ct5") + tdSql.error(f"select round(c1) from {dbname}.stbbb1") + tdSql.error(f"select round(c1) from {dbname}.tbname") + tdSql.error(f"select round(c1) from {dbname}.ct5") # mix with common col - tdSql.query("select c1, round(c1) from ct1") + tdSql.query(f"select c1, round(c1) from {dbname}.ct1") tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 1 ,8) tdSql.checkData(4 , 0 ,0) tdSql.checkData(4 , 1 ,0) - tdSql.query("select c1, round(c1) from ct4") + tdSql.query(f"select c1, round(c1) from {dbname}.ct4") tdSql.checkData(0 , 0 , None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,5) tdSql.checkData(4 , 1 ,5) tdSql.checkData(5 , 0 ,None) tdSql.checkData(5 , 1 ,None) - tdSql.query("select c1, round(c1) from ct4 ") + tdSql.query(f"select c1, round(c1) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,5) tdSql.checkData(4 , 1 ,5) # mix with common functions - tdSql.query("select c1, round(c1),c5, round(c5) from ct4 ") + tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) @@ -300,34 +297,34 @@ class TDTestCase: tdSql.checkData(6 , 2 ,4.44000) tdSql.checkData(6 , 3 ,4.00000) - tdSql.query("select c1, round(c1),c5, round(c5) from stb1 ") + tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.stb1 ") # mix with agg functions , not support - tdSql.error("select c1, round(c1),c5, count(c5) from stb1 ") - tdSql.error("select c1, round(c1),c5, count(c5) from ct1 ") - tdSql.error("select round(c1), count(c5) from stb1 ") - tdSql.error("select round(c1), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select round(c1), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select round(c1), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") # bug fix for count - tdSql.query("select count(c1) from ct4 ") + tdSql.query(f"select count(c1) from {dbname}.ct4 ") tdSql.checkData(0,0,9) - tdSql.query("select count(*) from ct4 ") + tdSql.query(f"select count(*) from {dbname}.ct4 ") tdSql.checkData(0,0,12) - tdSql.query("select count(c1) from stb1 ") + tdSql.query(f"select count(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,22) - tdSql.query("select count(*) from stb1 ") + tdSql.query(f"select count(*) from {dbname}.stb1 ") tdSql.checkData(0,0,25) # bug fix for compute - tdSql.query("select c1, abs(c1) -0 ,round(c1)-0 from ct4 ") + tdSql.query(f"select c1, abs(c1) -0 ,round(c1)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -335,7 +332,7 @@ class TDTestCase: tdSql.checkData(1, 1, 8.000000000) tdSql.checkData(1, 2, 8.000000000) - tdSql.query(" select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from ct4") + tdSql.query(f"select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -343,9 +340,8 @@ class TDTestCase: tdSql.checkData(1, 1, 8.000000000) tdSql.checkData(1, 2, 7.900000000) - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -353,7 +349,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,3.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -361,7 +357,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -369,7 +365,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from ct4 where c1>log(c1,2) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from {dbname}.ct4 where c1>log(c1,2) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,8) tdSql.checkData(0,1,88888) @@ -382,44 +378,42 @@ class TDTestCase: def round_Arithmetic(self): pass - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from sub1_bound") - self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from sub1_bound") - self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from sub1_bound;" , "select round(c1) from sub1_bound" ) + self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from {dbname}.sub1_bound") + self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from {dbname}.sub1_bound") + self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.sub1_bound;" , f"select round(c1) from {dbname}.sub1_bound" ) # check basic elem for table per row - tdSql.query("select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from sub1_bound ") + tdSql.query(f"select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from {dbname}.sub1_bound ") tdSql.checkData(0, 0, 2147483647.000000000) tdSql.checkData(0, 2, 32767.000000000) tdSql.checkData(0, 3, 127.000000000) @@ -430,19 +424,18 @@ class TDTestCase: tdSql.checkData(4, 3, -123.000000000) tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000) - self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from sub1_bound ") + self.check_result_auto(f"select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from {dbname}.sub1_bound" ,f"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from {dbname}.sub1_bound ") - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto( " select c5 from stb1 order by ts " , "select round(c5) from stb1 order by ts" ) - self.check_result_auto( " select c5 from stb1 order by tbname " , "select round(c5) from stb1 order by tbname" ) - self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" ) + def support_super_table_test(self, dbname="db"): + self.check_result_auto( f"select c5 from {dbname}.stb1 order by ts " , f"select round(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto( f"select c5 from {dbname}.stb1 order by tbname " , f"select round(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select t1,c5 from stb1 order by ts " , "select round(t1), round(c5) from stb1 order by ts" ) - self.check_result_auto( " select t1,c5 from stb1 order by tbname " , "select round(t1) ,round(c5) from stb1 order by tbname" ) - self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) ,round(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) , round(c5) from stb1 where c1 > 0 order by tbname" ) + self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select round(t1), round(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) , round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) pass diff --git a/tests/system-test/2-query/rtrim.py b/tests/system-test/2-query/rtrim.py index 30624792cc..80307e8534 100644 --- a/tests/system-test/2-query/rtrim.py +++ b/tests/system-test/2-query/rtrim.py @@ -120,16 +120,16 @@ class TDTestCase: return sqls - def __test_current(self): # sourcery skip: use-itertools-product + def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__rtrim_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__rtrim_err_check(tb): @@ -142,17 +142,15 @@ class TDTestCase: self.__test_error() - def __create_tb(self): - tdSql.prepare() - + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (t1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -162,29 +160,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -200,7 +198,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -216,13 +214,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -251,8 +249,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py index 46d2062341..bc1c0482ea 100644 --- a/tests/system-test/2-query/sample.py +++ b/tests/system-test/2-query/sample.py @@ -11,21 +11,17 @@ # -*- coding: utf-8 -*- -from pstats import Stats import sys -import subprocess import random -import math -import numpy as np -import inspect import re -import taos from util.log import * from util.cases import * from util.sql import * from util.dnodes import * +DBNAME = "db" + class TDTestCase: def init(self, conn, logSql): @@ -33,11 +29,11 @@ class TDTestCase: tdSql.init(conn.cursor()) self.ts = 1537146000000 - def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + def sample_query_form(self, sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): ''' sample function: - :param sel: string, must be "select", required parameters; + :param sel: string, must be f"select", required parameters; :param func: string, in this case must be "sample(", otherwise return other function, required parameters; :param col: string, column name, required parameters; :param m_comm: string, comma between col and k , required parameters; @@ -47,12 +43,12 @@ class TDTestCase: :param fr: string, must be "from", required parameters; :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; :param condition: expression; - :return: sample query statement,default: select sample(c1, 1) from t1 + :return: sample query statement,default: select sample(c1, 1) from {dbname}.t1 ''' return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" - def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + def checksample(self,sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{DBNAME}.t1", condition=""): # print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, # table_expr=table_expr, condition=condition)) line = sys._getframe().f_back.f_lineno @@ -65,7 +61,7 @@ class TDTestCase: )) - sql = "select * from t1" + sql = f"select * from {table_expr}" collist = tdSql.getColNameList(sql) if not isinstance(col, str): @@ -125,7 +121,7 @@ class TDTestCase: # table_expr=table_expr, condition=condition # )) - if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]): + if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != f"select"]): print(f"case in {line}: ", end='') return tdSql.error(self.sample_query_form( sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, @@ -286,14 +282,14 @@ class TDTestCase: return else: - if "where" in condition: - condition = re.sub('where', f"where {col} is not null and ", condition) - else: - condition = f"where {col} is not null" + condition - print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") - tdSql.query(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # if "where" in condition: + # condition = re.sub('where', f"where {col} is not null and ", condition) + # else: + # condition = f"where {col} is not null" + condition + # print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # tdSql.query(f"select _c0, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") # offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 - pre_sample = tdSql.queryResult + # pre_sample = tdSql.queryResult # pre_len = tdSql.queryRows # for i in range(sample_len): # if sample_result[pre_row:pre_row + step][i] not in pre_sample: @@ -301,7 +297,7 @@ class TDTestCase: # else: # tdLog.info(f"case in {line} is success: sample data is in {group_name}") - pass + pass def sample_current_query(self) : @@ -322,24 +318,24 @@ class TDTestCase: self.checksample(**case6) # # case7~8: nested query - # case7 = {"table_expr": "(select c1 from stb1)"} - # self.checksample(**case7) - # case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"} - # self.checksample(**case8) + case7 = {"table_expr": f"(select c1 from {DBNAME}.stb1)"} + self.checksample(**case7) + case8 = {"table_expr": f"(select sample(c1, 1) c1 from {DBNAME}.stb1 group by tbname)"} + self.checksample(**case8) # case9~10: mix with tbname/ts/tag/col - # case9 = {"alias": ", tbname"} - # self.checksample(**case9) - # case10 = {"alias": ", _c0"} - # self.checksample(**case10) - # case11 = {"alias": ", st1"} + case9 = {"alias": ", tbname"} + self.checksample(**case9) + case10 = {"alias": ", _c0"} + self.checksample(**case10) + case11 = {"alias": ", st1"} # self.checksample(**case11) - tdSql.query("select sample( c1 , 1 ) , st1 from t1") + tdSql.query(f"select sample( c1 , 1 ) , st1 from {DBNAME}.t1") - # case12 = {"alias": ", c1"} + case12 = {"alias": ", c1"} # self.checksample(**case12) - tdSql.query("select sample( c1 , 1 ) , c1 from t1") + tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1") # case13~15: with single condition case13 = {"condition": "where c1 <= 10"} @@ -353,32 +349,31 @@ class TDTestCase: case16 = {"condition": "where c6=1 or c6 =0"} self.checksample(**case16) - # # case17: only support normal table join - # case17 = { - # "col": "t1.c1", - # "table_expr": "t1, t2", - # "condition": "where t1.ts=t2.ts" - # } - # self.checksample(**case17) - # # case18~19: with group by - # case19 = { - # "table_expr": "stb1", - # "condition": "partition by tbname" - # } + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": f"{DBNAME}.t1 t1 join {DBNAME}.t2 t2 on t1.ts = t2.ts", + } + self.checksample(**case17) + # case18~19: with group by + case19 = { + "table_expr": f"{DBNAME}.stb1", + "condition": "partition by tbname" + } # self.checksample(**case19) - # # case20~21: with order by - # case20 = {"condition": "order by ts"} + # case20~21: with order by + case20 = {"condition": "order by ts"} # self.checksample(**case20) - # case21 = { - # "table_expr": "stb1", - # "condition": "partition by tbname order by tbname" - # } + case21 = { + "table_expr": f"{DBNAME}.stb1", + "condition": "partition by tbname order by tbname" + } # self.checksample(**case21) # case22: with union case22 = { - "condition": "union all select sample( c1 , 1 ) from t2" + "condition": f"union all select sample( c1 , 1 ) from {DBNAME}.t2" } self.checksample(**case22) @@ -396,12 +391,12 @@ class TDTestCase: case26 = {"k": 1000} self.checksample(**case26) case27 = { - "table_expr": "stb1", + "table_expr": f"{DBNAME}.stb1", "condition": "group by tbname slimit 1 " } self.checksample(**case27) # with slimit case28 = { - "table_expr": "stb1", + "table_expr": f"{DBNAME}.stb1", "condition": "group by tbname slimit 1 soffset 1" } self.checksample(**case28) # with soffset @@ -431,7 +426,7 @@ class TDTestCase: # err9 = {"col": "st1"} # self.checksample(**err9) # col: tag - tdSql.query(" select sample(st1 ,1) from t1 ") + tdSql.query(f"select sample(st1 ,1) from {DBNAME}.t1 ") # err10 = {"col": 1} # self.checksample(**err10) # col: value # err11 = {"col": "NULL"} @@ -494,13 +489,13 @@ class TDTestCase: self.checksample(**err39) # mix with calculation function 2 # err40 = {"alias": "+ 2"} # self.checksample(**err40) # mix with arithmetic 1 - # tdSql.query(" select sample(c1 , 1) + 2 from t1 ") + # tdSql.query(f"select sample(c1 , 1) + 2 from {dbname}.t1 ") err41 = {"alias": "+ avg(c1)"} # self.checksample(**err41) # mix with arithmetic 2 # err42 = {"alias": ", c1"} # self.checksample(**err42) - tdSql.query("select sample( c1 , 1 ) , c1 from t1") + tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1") # mix with other col # err43 = {"table_expr": "stb1"} # self.checksample(**err43) # select stb directly @@ -510,14 +505,14 @@ class TDTestCase: # "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" # } # self.checksample(**err44) # stb join - tdSql.query("select sample( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts") + tdSql.query(f"select sample( stb1.c1 , 1 ) from {DBNAME}.stb1 stb1, {DBNAME}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts") # err45 = { # "condition": "where ts>0 and ts < now interval(1h) fill(next)" # } # self.checksample(**err45) # interval - tdSql.error("select sample( c1 , 1 ) from t1 where ts>0 and ts < now interval(1h) fill(next)") + tdSql.error(f"select sample( c1 , 1 ) from {DBNAME}.t1 where ts>0 and ts < now interval(1h) fill(next)") err46 = { - "table_expr": "t1", + "table_expr": f"{DBNAME}.t1", "condition": "group by c6" } # self.checksample(**err46) # group by normal col @@ -563,49 +558,45 @@ class TDTestCase: pass - def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + def sample_test_data(self, tbnum:int, data_row:int, basetime:int, dbname="db") -> None : for i in range(tbnum): for j in range(data_row): tdSql.execute( - f"insert into t{i} values (" + f"insert into {dbname}.t{i} values (" f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" ) tdSql.execute( - f"insert into t{i} values (" + f"insert into {dbname}.t{i} values (" f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" ) tdSql.execute( - f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" ) pass - def sample_test_table(self,tbnum: int) -> None : - tdSql.execute("drop database if exists db") - tdSql.execute("create database if not exists db keep 3650") - tdSql.execute("use db") + def sample_test_table(self,tbnum: int, dbname="db") -> None : + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} keep 3650") tdSql.execute( - "create stable db.stb1 (\ + f"create stable {dbname}.stb1 (\ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ ) \ tags(st1 int)" ) tdSql.execute( - "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(st2 int)" ) for i in range(tbnum): - tdSql.execute(f"create table t{i} using stb1 tags({i})") - tdSql.execute(f"create table tt{i} using stb2 tags({i})") - - pass - + tdSql.execute(f"create table {dbname}.t{i} using {dbname}.stb1 tags({i})") + tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})") def check_sample(self , sample_query , origin_query ): @@ -626,45 +617,43 @@ class TDTestCase: else: tdLog.exit(" sample data is not in datas groups ,failed sql is : %s" % sample_query ) - - def basic_sample_query(self): - tdSql.execute(" drop database if exists db ") - tdSql.execute(" create database if not exists db duration 300d ") - tdSql.execute(" use db ") + def basic_sample_query(self, dbname="db"): + tdSql.execute(f" drop database if exists {dbname} ") + tdSql.execute(f" create database if not exists {dbname} duration 300d ") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -683,116 +672,116 @@ class TDTestCase: # basic query for sample # params test for all - tdSql.error(" select sample(c1,c1) from t1 ") - tdSql.error(" select sample(c1,now) from t1 ") - tdSql.error(" select sample(c1,tbname) from t1 ") - tdSql.error(" select sample(c1,ts) from t1 ") - tdSql.error(" select sample(c1,false) from t1 ") - tdSql.query(" select sample(123,1) from t1 ") + tdSql.error(f"select sample(c1,c1) from {dbname}.t1 ") + tdSql.error(f"select sample(c1,now) from {dbname}.t1 ") + tdSql.error(f"select sample(c1,tbname) from {dbname}.t1 ") + tdSql.error(f"select sample(c1,ts) from {dbname}.t1 ") + tdSql.error(f"select sample(c1,false) from {dbname}.t1 ") + tdSql.query(f"select sample(123,1) from {dbname}.t1 ") - tdSql.query(" select sample(c1,2) from t1 ") + tdSql.query(f"select sample(c1,2) from {dbname}.t1 ") tdSql.checkRows(2) - tdSql.query(" select sample(c1,10) from t1 ") + tdSql.query(f"select sample(c1,10) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.query(" select sample(c8,10) from t1 ") + tdSql.query(f"select sample(c8,10) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.query(" select sample(c1,999) from t1 ") + tdSql.query(f"select sample(c1,999) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.query(" select sample(c1,1000) from t1 ") + tdSql.query(f"select sample(c1,1000) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.query(" select sample(c8,1000) from t1 ") + tdSql.query(f"select sample(c8,1000) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.error(" select sample(c1,-1) from t1 ") + tdSql.error(f"select sample(c1,-1) from {dbname}.t1 ") # bug need fix - # tdSql.query("select sample(c1 ,2) , 123 from stb1;") + # tdSql.query(f"select sample(c1 ,2) , 123 from {dbname}.stb1;") # all type support - tdSql.query(" select sample(c1 , 20 ) from ct4 ") + tdSql.query(f"select sample(c1 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c2 , 20 ) from ct4 ") + tdSql.query(f"select sample(c2 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c3 , 20 ) from ct4 ") + tdSql.query(f"select sample(c3 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c4 , 20 ) from ct4 ") + tdSql.query(f"select sample(c4 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c5 , 20 ) from ct4 ") + tdSql.query(f"select sample(c5 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c6 , 20 ) from ct4 ") + tdSql.query(f"select sample(c6 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c7 , 20 ) from ct4 ") + tdSql.query(f"select sample(c7 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c8 , 20 ) from ct4 ") + tdSql.query(f"select sample(c8 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c9 , 20 ) from ct4 ") + tdSql.query(f"select sample(c9 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c10 , 20 ) from ct4 ") + tdSql.query(f"select sample(c10 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - # tdSql.query(" select sample(t1 , 20 ) from ct1 ") + # tdSql.query(f"select sample(t1 , 20 ) from {dbname}.ct1 ") # tdSql.checkRows(13) # filter data - tdSql.query(" select sample(c1, 20 ) from t1 where c1 is null ") + tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 is null ") tdSql.checkRows(1) - tdSql.query(" select sample(c1, 20 ) from t1 where c1 =6 ") + tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 =6 ") tdSql.checkRows(1) - tdSql.query(" select sample(c1, 20 ) from t1 where c1 > 6 ") + tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6 ") tdSql.checkRows(3) - self.check_sample("select sample(c1, 20 ) from t1 where c1 > 6" , "select c1 from t1 where c1 > 6") + self.check_sample(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6" , f"select c1 from {dbname}.t1 where c1 > 6") - tdSql.query(" select sample( c1 , 1 ) from t1 where c1 in (0, 1,2) ") + tdSql.query(f"select sample( c1 , 1 ) from {dbname}.t1 where c1 in (0, 1,2) ") tdSql.checkRows(1) - tdSql.query("select sample( c1 ,3 ) from t1 where c1 between 1 and 10 ") + tdSql.query(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10 ") tdSql.checkRows(3) - self.check_sample("select sample( c1 ,3 ) from t1 where c1 between 1 and 10" ,"select c1 from t1 where c1 between 1 and 10") + self.check_sample(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10" ,f"select c1 from {dbname}.t1 where c1 between 1 and 10") # join - tdSql.query("select sample( ct4.c1 , 1 ) from ct1, ct4 where ct4.ts=ct1.ts") + tdSql.query(f"select sample( ct4.c1 , 1 ) from {dbname}.ct1 ct1, {dbname}.ct4 ct4 where ct4.ts=ct1.ts") # partition by tbname - tdSql.query("select sample(c1,2) from stb1 partition by tbname") + tdSql.query(f"select sample(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) - self.check_sample("select sample(c1,2) from stb1 partition by tbname" , "select c1 from stb1 partition by tbname") + self.check_sample(f"select sample(c1,2) from {dbname}.stb1 partition by tbname" , f"select c1 from {dbname}.stb1 partition by tbname") # nest query - # tdSql.query("select sample(c1,2) from (select c1 from t1); ") + # tdSql.query(f"select sample(c1,2) from (select c1 from {dbname}.t1); ") # tdSql.checkRows(2) # union all - tdSql.query("select sample(c1,2) from t1 union all select sample(c1,3) from t1") + tdSql.query(f"select sample(c1,2) from {dbname}.t1 union all select sample(c1,3) from {dbname}.t1") tdSql.checkRows(5) # fill interval # not support mix with other function - tdSql.error("select top(c1,2) , sample(c1,2) from ct1") - tdSql.error("select max(c1) , sample(c1,2) from ct1") - tdSql.query("select c1 , sample(c1,2) from ct1") + tdSql.error(f"select top(c1,2) , sample(c1,2) from {dbname}.ct1") + tdSql.error(f"select max(c1) , sample(c1,2) from {dbname}.ct1") + tdSql.query(f"select c1 , sample(c1,2) from {dbname}.ct1") # bug for mix with scalar - tdSql.query("select 123 , sample(c1,100) from ct1") - tdSql.query("select sample(c1,100)+2 from ct1") - tdSql.query("select abs(sample(c1,100)) from ct1") + tdSql.query(f"select 123 , sample(c1,100) from {dbname}.ct1") + tdSql.query(f"select sample(c1,100)+2 from {dbname}.ct1") + tdSql.query(f"select abs(sample(c1,100)) from {dbname}.ct1") - def sample_test_run(self) : + def sample_test_run(self, dbname="db") : tdLog.printNoPrefix("==========support sample function==========") tbnum = 10 nowtime = int(round(time.time() * 1000)) @@ -805,28 +794,28 @@ class TDTestCase: tdLog.printNoPrefix("######## insert only NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5})") self.sample_current_query() self.sample_error_query() tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") - # self.sample_test_table(tbnum) - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " - # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " - # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") - # self.sample_current_query() - # self.sample_error_query() + self.sample_test_table(tbnum) + tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.sample_current_query() + self.sample_error_query() tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") - # self.sample_test_table(tbnum) - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " - # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " - # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") - # self.sample_current_query() - # self.sample_error_query() + self.sample_test_table(tbnum) + tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.sample_current_query() + self.sample_error_query() tdLog.printNoPrefix("######## insert data without NULL data test:") self.sample_test_table(tbnum) @@ -837,16 +826,16 @@ class TDTestCase: tdLog.printNoPrefix("######## insert data mix with NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") self.sample_current_query() self.sample_error_query() tdLog.printNoPrefix("######## check after WAL test:") - tdSql.query("select * from information_schema.ins_dnodes") + tdSql.query(f"select * from information_schema.ins_dnodes") index = tdSql.getData(0, 0) tdDnodes.stop(index) tdDnodes.start(index) @@ -855,25 +844,25 @@ class TDTestCase: self.basic_sample_query() - def sample_big_data(self): - tdSql.execute("create database sample_db") + def sample_big_data(self, dbname="sample_db"): + tdSql.execute(f"create database {dbname}") tdSql.execute("use sample_db") - tdSql.execute("create stable st (ts timestamp ,c1 int ) tags(ind int)" ) - tdSql.execute("create table sub_tb using st tags(1)") + tdSql.execute(f"create stable {dbname}.st (ts timestamp ,c1 int ) tags(ind int)" ) + tdSql.execute(f"create table {dbname}.sub_tb using {dbname}.st tags(1)") for i in range(2000): ts = self.ts+i*10 - tdSql.execute(f"insert into sub_tb values({ts} ,{i})") + tdSql.execute(f"insert into {dbname}.sub_tb values({ts} ,{i})") - tdSql.query("select count(*) from st") + tdSql.query(f"select count(*) from {dbname}.st") tdSql.checkData(0,0,2000) - tdSql.query("select sample(c1 ,1000) from st") + tdSql.query(f"select sample(c1 ,1000) from {dbname}.st") tdSql.checkRows(1000) # bug need fix - tdSql.query("select c1 ,t1, sample(c1,2) from db.stb1 partition by c1 ") - tdSql.query("select sample(c1,2) from db.stb1 partition by c1 ") - # tdSql.query("select c1 ,ind, sample(c1,2) from sample_db.st partition by c1 ") + tdSql.query(f"select c1 ,t1, sample(c1,2) from db.stb1 partition by c1 ") + tdSql.query(f"select sample(c1,2) from db.stb1 partition by c1 ") + # tdSql.query(f"select c1 ,ind, sample(c1,2) from {dbname}.sample_db.st partition by c1 ") def run(self): import traceback diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 65cfcd674e..43e2f3be32 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -138,6 +138,14 @@ python3 ./test.py -f 2-query/percentile.py python3 ./test.py -f 2-query/percentile.py -R python3 ./test.py -f 2-query/pow.py python3 ./test.py -f 2-query/pow.py -R +python3 ./test.py -f 2-query/query_cols_tags_and_or.py +python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R +python3 ./test.py -f 2-query/round.py +python3 ./test.py -f 2-query/round.py -R +python3 ./test.py -f 2-query/rtrim.py +# python3 ./test.py -f 2-query/rtrim.py -R +python3 ./test.py -f 2-query/sample.py +python3 ./test.py -f 2-query/sample.py -R python3 ./test.py -f 1-insert/update_data.py @@ -145,7 +153,6 @@ python3 ./test.py -f 1-insert/update_data.py python3 ./test.py -f 1-insert/delete_data.py python3 ./test.py -f 2-query/varchar.py -python3 ./test.py -f 2-query/rtrim.py python3 ./test.py -f 2-query/upper.py python3 ./test.py -f 2-query/lower.py python3 ./test.py -f 2-query/join2.py @@ -165,12 +172,10 @@ python3 ./test.py -f 2-query/Timediff.py python3 ./test.py -f 2-query/json_tag.py python3 ./test.py -f 2-query/top.py -python3 ./test.py -f 2-query/round.py python3 ./test.py -f 2-query/log.py python3 ./test.py -f 2-query/sqrt.py python3 ./test.py -f 2-query/sin.py python3 ./test.py -f 2-query/tan.py -python3 ./test.py -f 2-query/query_cols_tags_and_or.py # python3 ./test.py -f 2-query/nestedQuery.py # TD-15983 subquery output duplicate name column. # Please Xiangyang Guo modify the following script @@ -179,7 +184,6 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py python3 ./test.py -f 2-query/elapsed.py python3 ./test.py -f 2-query/csum.py #python3 ./test.py -f 2-query/mavg.py -python3 ./test.py -f 2-query/sample.py python3 ./test.py -f 2-query/function_diff.py python3 ./test.py -f 2-query/unique.py python3 ./test.py -f 2-query/stateduration.py From bbcf017e5c2fca91fa2536406107eca9b17b66ad Mon Sep 17 00:00:00 2001 From: cpwu Date: Wed, 17 Aug 2022 16:16:36 +0800 Subject: [PATCH 05/72] fix case --- tests/system-test/2-query/sample.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py index 1a3c865146..7f1d7ab8c0 100644 --- a/tests/system-test/2-query/sample.py +++ b/tests/system-test/2-query/sample.py @@ -860,15 +860,9 @@ class TDTestCase: tdSql.checkRows(1000) # bug need fix -<<<<<<< HEAD - tdSql.query(f"select c1 ,t1, sample(c1,2) from db.stb1 partition by c1 ") - tdSql.query(f"select sample(c1,2) from db.stb1 partition by c1 ") - # tdSql.query(f"select c1 ,ind, sample(c1,2) from {dbname}.sample_db.st partition by c1 ") -======= tdSql.query("select c1 ,t1, sample(c1,2) from db.stb1 partition by c1 ") tdSql.query("select sample(c1,2) from db.stb1 partition by c1 ") tdSql.query("select c1 ,ind, sample(c1,2) from sample_db.st partition by c1 ") ->>>>>>> 3.0 def run(self): import traceback From 786a7378a4ed34005cb85c62154f4de50237cffd Mon Sep 17 00:00:00 2001 From: cpwu Date: Wed, 17 Aug 2022 16:45:37 +0800 Subject: [PATCH 06/72] add sin case --- tests/system-test/2-query/sin.py | 459 +++++++++++++++---------------- tests/system-test/fulltest.sh | 12 +- 2 files changed, 229 insertions(+), 242 deletions(-) diff --git a/tests/system-test/2-query/sin.py b/tests/system-test/2-query/sin.py index 7cb559c510..ae5e070a47 100644 --- a/tests/system-test/2-query/sin.py +++ b/tests/system-test/2-query/sin.py @@ -9,48 +9,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - - def prepare_datas(self): + + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) - + tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -65,14 +63,14 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_result_auto_sin(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -87,10 +85,11 @@ class TDTestCase: for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): + if auto_result[row_index][col_index] is None and elem: + check_status = False + elif auto_result[row_index][col_index] is not None and (auto_result[row_index][col_index] - elem > 0.00000001): + print("====,auto_result[row_index][col_index]:",auto_result[row_index][col_index], "elem:", elem) check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False else: pass if not check_status: @@ -98,174 +97,174 @@ class TDTestCase: sys.exit(1) else: tdLog.info("sin value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def test_errors(self): + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select sin from t1", - # "select sin(-+--+c1 ) from t1", - # "select +-sin(c1) from t1", - # "select ++-sin(c1) from t1", - # "select ++--sin(c1) from t1", - # "select - -sin(c1)*0 from t1", - # "select sin(tbname+1) from t1 ", - "select sin(123--123)==1 from t1", - "select sin(c1) as 'd1' from t1", - "select sin(c1 ,c2) from t1", - "select sin(c1 ,NULL ) from t1", - "select sin(,) from t1;", - "select sin(sin(c1) ab from t1)", - "select sin(c1 ) as int from t1", - "select sin from stb1", - # "select sin(-+--+c1) from stb1", - # "select +-sin(c1) from stb1", - # "select ++-sin(c1) from stb1", - # "select ++--sin(c1) from stb1", - # "select - -sin(c1)*0 from stb1", - # "select sin(tbname+1) from stb1 ", - "select sin(123--123)==1 from stb1", - "select sin(c1) as 'd1' from stb1", - "select sin(c1 ,c2 ) from stb1", - "select sin(c1 ,NULL) from stb1", - "select sin(,) from stb1;", - "select sin(sin(c1) ab from stb1)", - "select sin(c1) as int from stb1" + f"select sin from {dbname}.t1", + # f"select sin(-+--+c1 ) from {dbname}.t1", + # f"select +-sin(c1) from {dbname}.t1", + # f"select ++-sin(c1) from {dbname}.t1", + # f"select ++--sin(c1) from {dbname}.t1", + # f"select - -sin(c1)*0 from {dbname}.t1", + # f"select sin(tbname+1) from {dbname}.t1 ", + f"select sin(123--123)==1 from {dbname}.t1", + f"select sin(c1) as 'd1' from {dbname}.t1", + f"select sin(c1 ,c2) from {dbname}.t1", + f"select sin(c1 ,NULL ) from {dbname}.t1", + f"select sin(,) from {dbname}.t1;", + f"select sin(sin(c1) ab from {dbname}.t1)", + f"select sin(c1 ) as int from {dbname}.t1", + f"select sin from {dbname}.stb1", + # f"select sin(-+--+c1) from {dbname}.stb1", + # f"select +-sin(c1) from {dbname}.stb1", + # f"select ++-sin(c1) from {dbname}.stb1", + # f"select ++--sin(c1) from {dbname}.stb1", + # f"select - -sin(c1)*0 from {dbname}.stb1", + # f"select sin(tbname+1) from {dbname}.stb1 ", + f"select sin(123--123)==1 from {dbname}.stb1", + f"select sin(c1) as 'd1' from {dbname}.stb1", + f"select sin(c1 ,c2 ) from {dbname}.stb1", + f"select sin(c1 ,NULL) from {dbname}.stb1", + f"select sin(,) from {dbname}.stb1;", + f"select sin(sin(c1) ab from {dbname}.stb1)", + f"select sin(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - - def support_types(self): + + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select sin(ts) from t1" , - "select sin(c7) from t1", - "select sin(c8) from t1", - "select sin(c9) from t1", - "select sin(ts) from ct1" , - "select sin(c7) from ct1", - "select sin(c8) from ct1", - "select sin(c9) from ct1", - "select sin(ts) from ct3" , - "select sin(c7) from ct3", - "select sin(c8) from ct3", - "select sin(c9) from ct3", - "select sin(ts) from ct4" , - "select sin(c7) from ct4", - "select sin(c8) from ct4", - "select sin(c9) from ct4", - "select sin(ts) from stb1" , - "select sin(c7) from stb1", - "select sin(c8) from stb1", - "select sin(c9) from stb1" , + f"select sin(ts) from {dbname}.t1" , + f"select sin(c7) from {dbname}.t1", + f"select sin(c8) from {dbname}.t1", + f"select sin(c9) from {dbname}.t1", + f"select sin(ts) from {dbname}.ct1" , + f"select sin(c7) from {dbname}.ct1", + f"select sin(c8) from {dbname}.ct1", + f"select sin(c9) from {dbname}.ct1", + f"select sin(ts) from {dbname}.ct3" , + f"select sin(c7) from {dbname}.ct3", + f"select sin(c8) from {dbname}.ct3", + f"select sin(c9) from {dbname}.ct3", + f"select sin(ts) from {dbname}.ct4" , + f"select sin(c7) from {dbname}.ct4", + f"select sin(c8) from {dbname}.ct4", + f"select sin(c9) from {dbname}.ct4", + f"select sin(ts) from {dbname}.stb1" , + f"select sin(c7) from {dbname}.stb1", + f"select sin(c8) from {dbname}.stb1", + f"select sin(c9) from {dbname}.stb1" , - "select sin(ts) from stbbb1" , - "select sin(c7) from stbbb1", + f"select sin(ts) from {dbname}.stbbb1" , + f"select sin(c7) from {dbname}.stbbb1", - "select sin(ts) from tbname", - "select sin(c9) from tbname" + f"select sin(ts) from {dbname}.tbname", + f"select sin(c9) from {dbname}.tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ - "select sin(c1) from t1", - "select sin(c2) from t1", - "select sin(c3) from t1", - "select sin(c4) from t1", - "select sin(c5) from t1", - "select sin(c6) from t1", + f"select sin(c1) from {dbname}.t1", + f"select sin(c2) from {dbname}.t1", + f"select sin(c3) from {dbname}.t1", + f"select sin(c4) from {dbname}.t1", + f"select sin(c5) from {dbname}.t1", + f"select sin(c6) from {dbname}.t1", - "select sin(c1) from ct1", - "select sin(c2) from ct1", - "select sin(c3) from ct1", - "select sin(c4) from ct1", - "select sin(c5) from ct1", - "select sin(c6) from ct1", + f"select sin(c1) from {dbname}.ct1", + f"select sin(c2) from {dbname}.ct1", + f"select sin(c3) from {dbname}.ct1", + f"select sin(c4) from {dbname}.ct1", + f"select sin(c5) from {dbname}.ct1", + f"select sin(c6) from {dbname}.ct1", - "select sin(c1) from ct3", - "select sin(c2) from ct3", - "select sin(c3) from ct3", - "select sin(c4) from ct3", - "select sin(c5) from ct3", - "select sin(c6) from ct3", + f"select sin(c1) from {dbname}.ct3", + f"select sin(c2) from {dbname}.ct3", + f"select sin(c3) from {dbname}.ct3", + f"select sin(c4) from {dbname}.ct3", + f"select sin(c5) from {dbname}.ct3", + f"select sin(c6) from {dbname}.ct3", - "select sin(c1) from stb1", - "select sin(c2) from stb1", - "select sin(c3) from stb1", - "select sin(c4) from stb1", - "select sin(c5) from stb1", - "select sin(c6) from stb1", + f"select sin(c1) from {dbname}.stb1", + f"select sin(c2) from {dbname}.stb1", + f"select sin(c3) from {dbname}.stb1", + f"select sin(c4) from {dbname}.stb1", + f"select sin(c5) from {dbname}.stb1", + f"select sin(c6) from {dbname}.stb1", - "select sin(c6) as alisb from stb1", - "select sin(c6) alisb from stb1", + f"select sin(c6) as alisb from {dbname}.stb1", + f"select sin(c6) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - - def basic_sin_function(self): - # basic query - tdSql.query("select c1 from ct3") + def basic_sin_function(self, dbname="db"): + + # basic query + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select sin(c1) from ct3") + tdSql.query(f"select sin(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c2) from ct3") + tdSql.query(f"select sin(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c3) from ct3") + tdSql.query(f"select sin(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c4) from ct3") + tdSql.query(f"select sin(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c5) from ct3") + tdSql.query(f"select sin(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c6) from ct3") + tdSql.query(f"select sin(c6) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select sin(c1) from t1") + tdSql.query(f"select sin(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 0.841470985) tdSql.checkData(3 , 0, 0.141120008) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from t1") - + self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from {dbname}.t1") + # used for sub table - tdSql.query("select c2 ,sin(c2) from ct1") + tdSql.query(f"select c2 ,sin(c2) from {dbname}.ct1") tdSql.checkData(0, 1, -0.220708349) tdSql.checkData(1 , 1, -0.556921845) tdSql.checkData(3 , 1, -0.798311364) tdSql.checkData(4 , 1, 0.000000000) - tdSql.query("select c1, c5 ,sin(c5) from ct4") + tdSql.query(f"select c1, c5 ,sin(c5) from {dbname}.ct4") tdSql.checkData(0 , 2, None) tdSql.checkData(1 , 2, 0.518228108) tdSql.checkData(2 , 2, 0.996475613) tdSql.checkData(3 , 2, 0.367960369) tdSql.checkData(5 , 2, None) - self.check_result_auto_sin( "select c1, c2, c3 , c4, c5 from ct1", "select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from ct1") - + self.check_result_auto_sin( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from {dbname}.ct1") + # nest query for sin functions - tdSql.query("select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from ct1;") + tdSql.query(f"select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 88) tdSql.checkData(0 , 1 , 0.035398303) tdSql.checkData(0 , 2 , 0.035390911) @@ -281,52 +280,52 @@ class TDTestCase: tdSql.checkData(11 , 2 , 0.841042171) tdSql.checkData(11 , 3 , 0.745338326) - # used for stable table - - tdSql.query("select sin(c1) from stb1") + # used for stable table + + tdSql.query(f"select sin(c1) from {dbname}.stb1") tdSql.checkRows(25) - + # used for not exists table - tdSql.error("select sin(c1) from stbbb1") - tdSql.error("select sin(c1) from tbname") - tdSql.error("select sin(c1) from ct5") + tdSql.error(f"select sin(c1) from {dbname}.stbbb1") + tdSql.error(f"select sin(c1) from {dbname}.tbname") + tdSql.error(f"select sin(c1) from {dbname}.ct5") + + # mix with common col + tdSql.query(f"select c1, sin(c1) from {dbname}.ct1") + tdSql.query(f"select c2, sin(c2) from {dbname}.ct4") - # mix with common col - tdSql.query("select c1, sin(c1) from ct1") - tdSql.query("select c2, sin(c2) from ct4") - # mix with common functions - tdSql.query("select c1, sin(c1),sin(c1), sin(sin(c1)) from ct4 ") + tdSql.query(f"select c1, sin(c1),sin(c1), sin(sin(c1)) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,-0.279415498) tdSql.checkData(3 , 2 ,-0.279415498) tdSql.checkData(3 , 3 ,-0.275793863) - tdSql.query("select c1, sin(c1),c5, floor(c5) from stb1 ") + tdSql.query(f"select c1, sin(c1),c5, floor(c5) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, sin(c1),c5, count(c5) from stb1 ") - tdSql.error("select c1, sin(c1),c5, count(c5) from ct1 ") - tdSql.error("select sin(c1), count(c5) from stb1 ") - tdSql.error("select sin(c1), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select sin(c1), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select sin(c1), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") - - # # bug fix for compute - tdSql.query("select c1, sin(c1) -0 ,sin(c1-4)-0 from ct4 ") + + # # bug fix for compute + tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-4)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -334,7 +333,7 @@ class TDTestCase: tdSql.checkData(1, 1, 0.989358247) tdSql.checkData(1, 2, -0.756802495) - tdSql.query(" select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from ct4") + tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -342,35 +341,34 @@ class TDTestCase: tdSql.checkData(1, 1, 0.989358247) tdSql.checkData(1, 2, 0.898941342) - tdSql.query("select c1, sin(c1), c2, sin(c2), c3, sin(c3) from ct1") + tdSql.query(f"select c1, sin(c1), c2, sin(c2), c3, sin(c3) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, sin(100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, math.sin(100000000)) - tdSql.query("select c1, sin(10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, math.sin(10000000000000)) - tdSql.query("select c1, sin(10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sin(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sin(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(1, 1, math.sin(10000000000000000000000000.0)) - tdSql.query("select c1, sin(10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sin(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sin(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000.0)) - tdSql.query("select c1, sin(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sin(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000000000.0)) - tdSql.query("select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -378,7 +376,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,1.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -386,7 +384,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,-1.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=sin(c1) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=sin(c1) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,0) tdSql.checkData(0,1,0) @@ -394,45 +392,40 @@ class TDTestCase: tdSql.checkData(0,3,0.000000000) tdSql.checkData(0,4,-0.100000000) tdSql.checkData(0,5,0.000000000) - - def pow_Arithmetic(self): - pass - - def check_boundary_values(self): + + def check_boundary_values(self, dbname="db"): PI=3.1415926 - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from sub1_bound") - - self.check_result_auto_sin( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from sub1_bound") + self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from {dbname}.sub1_bound") + + self.check_result_auto_sin( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from {dbname}.sub1_bound") + + self.check_result_auto_sin(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sin(abs(c1)) from {dbname}.sub1_bound" ) - self.check_result_auto_sin("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sin(abs(c1)) from sub1_bound" ) - # check basic elem for table per row - tdSql.query("select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from sub1_bound ") + tdSql.query(f"select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.sin(2147483647)) tdSql.checkData(0,1,math.sin(9223372036854775807)) tdSql.checkData(0,2,math.sin(32767)) @@ -450,75 +443,73 @@ class TDTestCase: tdSql.checkData(3,4,math.sin(339999995214436424907732413799364296704.00000)) # check + - * / in functions - tdSql.query("select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from sub1_bound ") + tdSql.query(f"select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.sin(2147483648.000000000)) tdSql.checkData(0,1,math.sin(9223372036854775807)) tdSql.checkData(0,2,math.sin(32767.000000000)) tdSql.checkData(0,3,math.sin(63.500000000)) - tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);") - tdSql.execute(f'create table tb1 using st tags (1)') - tdSql.execute(f'create table tb2 using st tags (2)') - tdSql.execute(f'create table tb3 using st tags (3)') - tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) + tdSql.execute("create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);") + tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)') + tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)') + tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)') + tdSql.execute('insert into {dbname}.tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) + tdSql.execute('insert into {dbname}.tb1 values (now()-30s, {}, {})'.format(PI ,PI )) + tdSql.execute('insert into {dbname}.tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) + tdSql.execute('insert into {dbname}.tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) + tdSql.execute('insert into {dbname}.tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) - tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) + tdSql.execute('insert into {dbname}.tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) + tdSql.execute('insert into {dbname}.tb2 values (now()-30s, {}, {})'.format(PI ,PI )) + tdSql.execute('insert into {dbname}.tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) + tdSql.execute('insert into {dbname}.tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) + tdSql.execute('insert into {dbname}.tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) for i in range(100): - tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2)) + tdSql.execute('insert into {dbname}.tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2)) - self.check_result_auto_sin("select num1,num2 from tb3;" , "select sin(num1),sin(num2) from tb3") + self.check_result_auto_sin(f"select num1,num2 from {dbname}.tb3;" , f"select sin(num1),sin(num2) from {dbname}.tb3") - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto_sin( " select c5 from stb1 order by ts " , "select sin(c5) from stb1 order by ts" ) - self.check_result_auto_sin( " select c5 from stb1 order by tbname " , "select sin(c5) from stb1 order by tbname" ) - self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" ) + def support_super_table_test(self, dbname="db"): + self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by ts " , f"select sin(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by tbname " , f"select sin(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + + self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sin(t1), sin(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) , sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sin( " select t1,c5 from stb1 order by ts " , "select sin(t1), sin(c5) from stb1 order by ts" ) - self.check_result_auto_sin( " select t1,c5 from stb1 order by tbname " , "select sin(t1) ,sin(c5) from stb1 order by tbname" ) - self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) ,sin(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) , sin(c5) from stb1 where c1 > 0 order by tbname" ) - pass - def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: sin basic query ============") + tdLog.printNoPrefix("==========step4: sin basic query ============") self.basic_sin_function() - tdLog.printNoPrefix("==========step5: big number sin query ============") + tdLog.printNoPrefix("==========step5: big number sin query ============") self.test_big_number() - tdLog.printNoPrefix("==========step6: sin boundary query ============") + tdLog.printNoPrefix("==========step6: sin boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step7: sin filter query ============") + tdLog.printNoPrefix("==========step7: sin filter query ============") self.abs_func_filter() @@ -526,7 +517,7 @@ class TDTestCase: self.support_super_table_test() - + def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index faaa255aca..e89970b6ff 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -146,6 +146,8 @@ python3 ./test.py -f 2-query/rtrim.py # python3 ./test.py -f 2-query/rtrim.py -R python3 ./test.py -f 2-query/sample.py python3 ./test.py -f 2-query/sample.py -R +python3 ./test.py -f 2-query/sin.py +# python3 ./test.py -f 2-query/sin.py -R python3 ./test.py -f 1-insert/update_data.py @@ -174,7 +176,6 @@ python3 ./test.py -f 2-query/json_tag.py python3 ./test.py -f 2-query/top.py python3 ./test.py -f 2-query/log.py python3 ./test.py -f 2-query/sqrt.py -python3 ./test.py -f 2-query/sin.py python3 ./test.py -f 2-query/tan.py # python3 ./test.py -f 2-query/nestedQuery.py # TD-15983 subquery output duplicate name column. @@ -183,12 +184,7 @@ python3 ./test.py -f 2-query/tan.py python3 ./test.py -f 2-query/elapsed.py python3 ./test.py -f 2-query/csum.py -<<<<<<< HEAD -#python3 ./test.py -f 2-query/mavg.py -======= python3 ./test.py -f 2-query/mavg.py -python3 ./test.py -f 2-query/sample.py ->>>>>>> 3.0 python3 ./test.py -f 2-query/function_diff.py python3 ./test.py -f 2-query/unique.py python3 ./test.py -f 2-query/stateduration.py @@ -375,7 +371,7 @@ python3 ./test.py -f 2-query/interp.py -Q 2 python3 ./test.py -f 2-query/avg.py -Q 2 # python3 ./test.py -f 2-query/elapsed.py -Q 2 python3 ./test.py -f 2-query/csum.py -Q 2 -python3 ./test.py -f 2-query/mavg.py -Q 2 +#python3 ./test.py -f 2-query/mavg.py -Q 2 python3 ./test.py -f 2-query/sample.py -Q 2 python3 ./test.py -f 2-query/function_diff.py -Q 2 python3 ./test.py -f 2-query/unique.py -Q 2 @@ -462,7 +458,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3 # python3 ./test.py -f 2-query/avg.py -Q 3 # python3 ./test.py -f 2-query/elapsed.py -Q 3 python3 ./test.py -f 2-query/csum.py -Q 3 -python3 ./test.py -f 2-query/mavg.py -Q 3 +#python3 ./test.py -f 2-query/mavg.py -Q 3 python3 ./test.py -f 2-query/sample.py -Q 3 python3 ./test.py -f 2-query/function_diff.py -Q 3 python3 ./test.py -f 2-query/unique.py -Q 3 From 40e2dc1f8d6195cf150550d791dba4fd9b4f3d6a Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Wed, 17 Aug 2022 18:08:25 +0800 Subject: [PATCH 07/72] test: update pow function check method --- tests/system-test/2-query/pow.py | 122 ++++++------------------------- 1 file changed, 22 insertions(+), 100 deletions(-) diff --git a/tests/system-test/2-query/pow.py b/tests/system-test/2-query/pow.py index 32e4b78ff3..0702d05c0b 100644 --- a/tests/system-test/2-query/pow.py +++ b/tests/system-test/2-query/pow.py @@ -64,41 +64,7 @@ class TDTestCase: ''' ) - def check_result_auto_pow2(self ,origin_query , pow_query): - - pow_result = tdSql.getResult(pow_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - else: - elem = math.pow(elem,2) - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - - for row_index , row in enumerate(pow_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] is None and elem : - check_status = False - elif auto_result[row_index][col_index] is not None and (auto_result[row_index][col_index] - elem > 0.001): - print(auto_result[row_index][col_index],", elem: ", elem ) - check_status = False - else: - pass - if not check_status: - tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def check_result_auto_pow1(self ,origin_query , pow_query): + def check_result_auto_pow(self ,base , origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) @@ -110,59 +76,15 @@ class TDTestCase: if elem == None: elem = None else : - elem = pow(elem ,1) + elem = float(pow(elem ,base)) row_check.append(elem) auto_result.append(row_check) - check_status = True + tdSql.query(pow_query) for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - print(auto_result[row_index][col_index],", elem: ", elem ) - check_status = False - else: - pass - if not check_status: - tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def check_result_auto_pow__10(self ,origin_query , pow_query): - pow_result = tdSql.getResult(pow_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - elif elem == 0: - elem = None - else: - elem = pow(elem ,-10) - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(pow_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - print(auto_result[row_index][col_index],", elem: ", elem ) - check_status = False - else: - pass - if not check_status: - tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query ) + tdSql.checkData(row_index,col_index ,auto_result[row_index][col_index]) + def test_errors(self, dbname="db"): error_sql_lists = [ @@ -311,9 +233,9 @@ class TDTestCase: tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_pow2( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,2), pow(c2 ,2) ,pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from {dbname}.t1") - self.check_result_auto_pow1( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,1), pow(c2 ,1) ,pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from {dbname}.t1") - self.check_result_auto_pow__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,-10), pow(c2 ,-10) ,pow(c3, -10), pow(c4 ,-10), pow(c5 ,-10) from {dbname}.t1") + self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,2) , pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from {dbname}.t1") + self.check_result_auto_pow( 1,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,1) , pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from {dbname}.t1") + self.check_result_auto_pow( 10,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,10) ,pow(c3, 10), pow(c4 ,10), pow(c5 ,10) from {dbname}.t1") # used for sub table tdSql.query(f"select c1 ,pow(c1 ,2) from {dbname}.ct1") @@ -332,8 +254,8 @@ class TDTestCase: tdSql.checkData(4 , 2, 7573.273783071) - self.check_result_auto_pow2( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.ct1") - self.check_result_auto_pow__10( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,-10), pow(c2,-10) ,pow(c3,-10), pow(c4,-10), pow(c5,-10) from {dbname}.ct1") + self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,2), pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.ct1") + self.check_result_auto_pow( 10, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,10), pow(c3,10), pow(c4,10), pow(c5,10) from {dbname}.ct1") # nest query for pow functions tdSql.query(f"select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from {dbname}.ct1;") @@ -568,13 +490,13 @@ class TDTestCase: tdSql.error( f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_pow2( f"select c1, c2, c3 , c4, c5 from {dbname}.sub1_bound ", f"select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.sub1_bound") - self.check_result_auto_pow__10( f"select c1, c2, c3 , c4, c5 from {dbname}.sub1_bound ", f"select pow(c1,-10), pow(c2,-10) ,pow(c3,-10), pow(c4,-10), pow(c5,-10) from {dbname}.sub1_bound") + self.check_result_auto_pow(2, f"select c1, c3 , c4, c5 from {dbname}.sub1_bound ", f"select pow(c1,2), pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.sub1_bound") + self.check_result_auto_pow(3, f"select c1, c3 , c4, c5 from {dbname}.sub1_bound ", f"select pow(c1,3), pow(c3,3), pow(c4,3), pow(c5,3) from {dbname}.sub1_bound") - self.check_result_auto_pow2( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c3,2), pow(c2,2) ,pow(c1,2) from {dbname}.sub1_bound") + self.check_result_auto_pow(2, f"select c1, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select pow(c1,2), pow(c3,2), pow(c3,2), pow(c2,2) ,pow(c1,2) from {dbname}.sub1_bound") - self.check_result_auto_pow2(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select pow(abs(c1) ,2) from {dbname}.sub1_bound" ) + self.check_result_auto_pow(2, f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select pow(abs(c1) ,2) from {dbname}.sub1_bound" ) # check basic elem for table per row tdSql.query(f"select pow(abs(c1),2) ,pow(abs(c2),2) , pow(abs(c3),2) , pow(abs(c4),2), pow(abs(c5),2), pow(abs(c6),2) from {dbname}.sub1_bound ") @@ -604,15 +526,15 @@ class TDTestCase: def support_super_table_test(self, dbname="db"): - self.check_result_auto_pow2( f"select c5 from {dbname}.stb1 order by ts " , f"select pow(c5,2) from {dbname}.stb1 order by ts" ) - self.check_result_auto_pow2( f"select c5 from {dbname}.stb1 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 order by tbname" ) - self.check_result_auto_pow2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by ts " , f"select pow(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select pow(t1,2), pow(c5,2) from {dbname}.stb1 order by ts" ) - self.check_result_auto_pow2( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 order by tbname" ) - self.check_result_auto_pow2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) , pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by ts " , f"select pow(t1,2), pow(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) , pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() From 1c4f86210d6b89038a943abb042fe7b8274c0023 Mon Sep 17 00:00:00 2001 From: cpwu Date: Wed, 17 Aug 2022 18:18:48 +0800 Subject: [PATCH 08/72] fix case --- tests/system-test/2-query/ltrim.py | 3 +- tests/system-test/2-query/smaTest.py | 37 ++- tests/system-test/2-query/sml.py | 24 +- tests/system-test/2-query/spread.py | 96 +++---- tests/system-test/2-query/sqrt.py | 361 +++++++++++++-------------- tests/system-test/fulltest.sh | 14 +- 6 files changed, 264 insertions(+), 271 deletions(-) diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py index 55c33b68eb..330f688990 100644 --- a/tests/system-test/2-query/ltrim.py +++ b/tests/system-test/2-query/ltrim.py @@ -251,8 +251,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/smaTest.py b/tests/system-test/2-query/smaTest.py index 67824cc3a3..0217b6c28c 100644 --- a/tests/system-test/2-query/smaTest.py +++ b/tests/system-test/2-query/smaTest.py @@ -30,14 +30,6 @@ class TDTestCase: # updatecfgDict = {'debugFlag': 135} # updatecfgDict = {'fqdn': 135} - def caseDescription(self): - ''' - limit and offset keyword function test cases; - case1: limit offset base function test - case2: offset return valid - ''' - return - # init def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -47,11 +39,12 @@ class TDTestCase: self.ts = 1500000000000 - # run case + # run case def run(self): # insert data - self.insert_data1("t1", self.ts, 1000*10000) - self.insert_data1("t4", self.ts, 1000*10000) + dbname = "db" + self.insert_data1(f"{dbname}.t1", self.ts, 1000*10000) + self.insert_data1(f"{dbname}.t4", self.ts, 1000*10000) # test base case # self.test_case1() tdLog.debug(" LIMIT test_case1 ............ [OK]") @@ -60,7 +53,7 @@ class TDTestCase: tdLog.debug(" LIMIT test_case2 ............ [OK]") - # stop + # stop def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) @@ -70,16 +63,16 @@ class TDTestCase: # # create table - def create_tables(self): + def create_tables(self, dbname="db"): # super table - tdSql.execute("create table st(ts timestamp, i1 int,i2 int) tags(area int)"); + tdSql.execute(f"create table {dbname}.st(ts timestamp, i1 int,i2 int) tags(area int)") # child table - tdSql.execute("create table t1 using st tags(1)"); + tdSql.execute(f"create table {dbname}.t1 using {dbname}.st tags(1)") - tdSql.execute("create table st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) "); - tdSql.execute("create table t4 using st1 tags(1)"); + tdSql.execute(f"create table {dbname}.st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) ") + tdSql.execute(f"create table {dbname}.t4 using {dbname}.st1 tags(1)") - return + return # insert data1 def insert_data(self, tbname, ts_start, count): @@ -91,7 +84,7 @@ class TDTestCase: if i >0 and i%30000 == 0: tdSql.execute(sql) sql = pre_insert - # end sql + # end sql if sql != pre_insert: tdSql.execute(sql) @@ -107,16 +100,16 @@ class TDTestCase: if i >0 and i%30000 == 0: tdSql.execute(sql) sql = pre_insert - # end sql + # end sql if sql != pre_insert: tdSql.execute(sql) tdLog.debug("INSERT TABLE DATA ............ [OK]") return - # test case1 base + # test case1 base # def test_case1(self): - # # + # # # # limit base function # # # # base no where diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index 6cfb9a1dad..4c95f70a4d 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -20,7 +20,7 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self): + def checkFileContent(self, dbname="sml_db"): buildPath = tdCom.getBuildPath() cmdStr = '%s/build/bin/sml_test'%(buildPath) tdLog.info(cmdStr) @@ -28,8 +28,8 @@ class TDTestCase: if ret != 0: tdLog.exit("sml_test failed") - tdSql.execute('use sml_db') - tdSql.query("select * from t_b7d815c9222ca64cdf2614c61de8f211") + # tdSql.execute('use sml_db') + tdSql.query(f"select * from {dbname}.t_b7d815c9222ca64cdf2614c61de8f211") tdSql.checkRows(1) tdSql.checkData(0, 0, '2016-01-01 08:00:07.000') @@ -44,35 +44,35 @@ class TDTestCase: tdSql.checkData(0, 9, 0) tdSql.checkData(0, 10, 25) - tdSql.query("select * from readings") + tdSql.query(f"select * from {dbname}.readings") tdSql.checkRows(9) - tdSql.query("select distinct tbname from readings") + tdSql.query(f"select distinct tbname from {dbname}.readings") tdSql.checkRows(4) - tdSql.query("select * from t_0799064f5487946e5d22164a822acfc8 order by _ts") + tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by _ts") tdSql.checkRows(2) tdSql.checkData(0, 3, "kk") tdSql.checkData(1, 3, None) - tdSql.query("select distinct tbname from `sys.if.bytes.out`") + tdSql.query(f"select distinct tbname from {dbname}.`sys.if.bytes.out`") tdSql.checkRows(2) - tdSql.query("select * from t_fc70dec6677d4277c5d9799c4da806da order by _ts") + tdSql.query(f"select * from {dbname}.t_fc70dec6677d4277c5d9799c4da806da order by _ts") tdSql.checkRows(2) tdSql.checkData(0, 1, 1.300000000) tdSql.checkData(1, 1,13.000000000) - tdSql.query("select * from `sys.procs.running`") + tdSql.query(f"select * from {dbname}.`sys.procs.running`") tdSql.checkRows(1) tdSql.checkData(0, 1, 42.000000000) tdSql.checkData(0, 2, "web01") - tdSql.query("select distinct tbname from `sys.cpu.nice`") + tdSql.query(f"select distinct tbname from {dbname}.`sys.cpu.nice`") tdSql.checkRows(2) - tdSql.query("select * from `sys.cpu.nice` order by _ts") + tdSql.query(f"select * from {dbname}.`sys.cpu.nice` order by _ts") tdSql.checkRows(2) tdSql.checkData(0, 1, 9.000000000) tdSql.checkData(0, 2, "lga") @@ -83,7 +83,7 @@ class TDTestCase: tdSql.checkData(1, 3, "web01") tdSql.checkData(1, 4, "t1") - tdSql.query("select * from macylr") + tdSql.query(f"select * from {dbname}.macylr") tdSql.checkRows(2) return diff --git a/tests/system-test/2-query/spread.py b/tests/system-test/2-query/spread.py index 51c569e565..ffe86ff363 100644 --- a/tests/system-test/2-query/spread.py +++ b/tests/system-test/2-query/spread.py @@ -26,6 +26,8 @@ TS_TYPE_COL = [ TS_COL, ] ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] +DBNAME = "db" + class TDTestCase: def init(self, conn, logSql): @@ -88,6 +90,7 @@ class TDTestCase: return join_condition def __where_condition(self, col=None, tbname=None, query_conditon=None): + # tbname = tbname.split(".")[-1] if tbname else None if query_conditon and isinstance(query_conditon, str): if query_conditon.startswith("count"): query_conditon = query_conditon[6:-1] @@ -129,32 +132,33 @@ class TDTestCase: return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}" @property - def __tb_list(self): + def __tb_list(self, dbname=DBNAME): return [ - "ct1", - "ct4", - "t1", - "ct2", - "stb1", + f"{dbname}.ct1", + f"{dbname}.ct4", + f"{dbname}.t1", + f"{dbname}.ct2", + f"{dbname}.stb1", ] def sql_list(self): sqls = [] __no_join_tblist = self.__tb_list for tb in __no_join_tblist: - select_claus_list = self.__query_condition(tb) - for select_claus in select_claus_list: - group_claus = self.__group_condition(col=select_claus) - where_claus = self.__where_condition(query_conditon=select_claus) - having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") - sqls.extend( - ( - self.__single_sql(select_claus, tb, where_claus, having_claus), - self.__single_sql(select_claus, tb,), - self.__single_sql(select_claus, tb, where_condition=where_claus), - self.__single_sql(select_claus, tb, group_condition=group_claus), - ) + tbname = tb.split(".")[-1] + select_claus_list = self.__query_condition(tbname) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + self.__single_sql(select_claus, tb,), + self.__single_sql(select_claus, tb, where_condition=where_claus), + self.__single_sql(select_claus, tb, group_condition=group_claus), ) + ) # return filter(None, sqls) return list(filter(None, sqls)) @@ -166,28 +170,28 @@ class TDTestCase: tdLog.info(f"sql: {sqls[i]}") tdSql.query(sqls[i]) - def __test_current(self): - tdSql.query("select spread(ts) from ct1") + def __test_current(self, dbname=DBNAME): + tdSql.query(f"select spread(ts) from {dbname}.ct1") tdSql.checkRows(1) - tdSql.query("select spread(c1) from ct2") + tdSql.query(f"select spread(c1) from {dbname}.ct2") tdSql.checkRows(1) - tdSql.query("select spread(c1) from ct4 group by c1") + tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c1") tdSql.checkRows(self.rows + 3) - tdSql.query("select spread(c1) from ct4 group by c7") + tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c7") tdSql.checkRows(3) - tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts") + tdSql.query(f"select spread(ct2.c1) from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") tdSql.checkRows(1) self.spread_check() - def __test_error(self): + def __test_error(self, dbname=DBNAME): tdLog.printNoPrefix("===step 0: err case, must return err") - tdSql.error( "select spread() from ct1" ) - tdSql.error( "select spread(1, 2) from ct2" ) - tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" ) - tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" ) - tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" ) + tdSql.error( f"select spread() from {dbname}.ct1" ) + tdSql.error( f"select spread(1, 2) from {dbname}.ct2" ) + tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from {dbname}.ct4" ) + tdSql.error( f"select spread({BOOLEAN_COL[0]}) from {dbname}.t1" ) + tdSql.error( f"select spread({CHAR_COL[0]}) from {dbname}.stb1" ) # tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) # from ct1 @@ -196,20 +200,20 @@ class TDTestCase: # having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") - def all_test(self): - self.__test_error() - self.__test_current() + def all_test(self, dbname=DBNAME): + self.__test_error(dbname) + self.__test_current(dbname) - def __create_tb(self): + def __create_tb(self, dbname=DBNAME): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (t1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -219,30 +223,30 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} - def __insert_data(self, rows): + def __insert_data(self, rows, dbname=DBNAME): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -258,7 +262,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -274,13 +278,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) diff --git a/tests/system-test/2-query/sqrt.py b/tests/system-test/2-query/sqrt.py index 425d59f118..4401a23dbf 100644 --- a/tests/system-test/2-query/sqrt.py +++ b/tests/system-test/2-query/sqrt.py @@ -9,48 +9,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -101,68 +99,68 @@ class TDTestCase: else: tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - def test_errors(self): + def test_errors(self, dbname="db"): error_sql_lists = [ - "select sqrt from t1", - # "select sqrt(-+--+c1 ) from t1", - # "select +-sqrt(c1) from t1", - # "select ++-sqrt(c1) from t1", - # "select ++--sqrt(c1) from t1", - # "select - -sqrt(c1)*0 from t1", - # "select sqrt(tbname+1) from t1 ", - "select sqrt(123--123)==1 from t1", - "select sqrt(c1) as 'd1' from t1", - "select sqrt(c1 ,c2) from t1", - "select sqrt(c1 ,NULL ) from t1", - "select sqrt(,) from t1;", - "select sqrt(sqrt(c1) ab from t1)", - "select sqrt(c1 ) as int from t1", - "select sqrt from stb1", - # "select sqrt(-+--+c1) from stb1", - # "select +-sqrt(c1) from stb1", - # "select ++-sqrt(c1) from stb1", - # "select ++--sqrt(c1) from stb1", - # "select - -sqrt(c1)*0 from stb1", - # "select sqrt(tbname+1) from stb1 ", - "select sqrt(123--123)==1 from stb1", - "select sqrt(c1) as 'd1' from stb1", - "select sqrt(c1 ,c2 ) from stb1", - "select sqrt(c1 ,NULL) from stb1", - "select sqrt(,) from stb1;", - "select sqrt(sqrt(c1) ab from stb1)", - "select sqrt(c1) as int from stb1" + f"select sqrt from {dbname}.t1", + # f"select sqrt(-+--+c1 ) from {dbname}.t1", + # f"select +-sqrt(c1) from {dbname}.t1", + # f"select ++-sqrt(c1) from {dbname}.t1", + # f"select ++--sqrt(c1) from {dbname}.t1", + # f"select - -sqrt(c1)*0 from {dbname}.t1", + # f"select sqrt(tbname+1) from {dbname}.t1 ", + f"select sqrt(123--123)==1 from {dbname}.t1", + f"select sqrt(c1) as 'd1' from {dbname}.t1", + f"select sqrt(c1 ,c2) from {dbname}.t1", + f"select sqrt(c1 ,NULL ) from {dbname}.t1", + f"select sqrt(,) from {dbname}.t1;", + f"select sqrt(sqrt(c1) ab from {dbname}.t1)", + f"select sqrt(c1 ) as int from {dbname}.t1", + f"select sqrt from {dbname}.stb1", + # f"select sqrt(-+--+c1) from {dbname}.stb1", + # f"select +-sqrt(c1) from {dbname}.stb1", + # f"select ++-sqrt(c1) from {dbname}.stb1", + # f"select ++--sqrt(c1) from {dbname}.stb1", + # f"select - -sqrt(c1)*0 from {dbname}.stb1", + # f"select sqrt(tbname+1) from {dbname}.stb1 ", + f"select sqrt(123--123)==1 from {dbname}.stb1", + f"select sqrt(c1) as 'd1' from {dbname}.stb1", + f"select sqrt(c1 ,c2 ) from {dbname}.stb1", + f"select sqrt(c1 ,NULL) from {dbname}.stb1", + f"select sqrt(,) from {dbname}.stb1;", + f"select sqrt(sqrt(c1) ab from {dbname}.stb1)", + f"select sqrt(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - def support_types(self): + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select sqrt(ts) from t1" , - "select sqrt(c7) from t1", - "select sqrt(c8) from t1", - "select sqrt(c9) from t1", - "select sqrt(ts) from ct1" , - "select sqrt(c7) from ct1", - "select sqrt(c8) from ct1", - "select sqrt(c9) from ct1", - "select sqrt(ts) from ct3" , - "select sqrt(c7) from ct3", - "select sqrt(c8) from ct3", - "select sqrt(c9) from ct3", - "select sqrt(ts) from ct4" , - "select sqrt(c7) from ct4", - "select sqrt(c8) from ct4", - "select sqrt(c9) from ct4", - "select sqrt(ts) from stb1" , - "select sqrt(c7) from stb1", - "select sqrt(c8) from stb1", - "select sqrt(c9) from stb1" , + f"select sqrt(ts) from {dbname}.t1" , + f"select sqrt(c7) from {dbname}.t1", + f"select sqrt(c8) from {dbname}.t1", + f"select sqrt(c9) from {dbname}.t1", + f"select sqrt(ts) from {dbname}.ct1" , + f"select sqrt(c7) from {dbname}.ct1", + f"select sqrt(c8) from {dbname}.ct1", + f"select sqrt(c9) from {dbname}.ct1", + f"select sqrt(ts) from {dbname}.ct3" , + f"select sqrt(c7) from {dbname}.ct3", + f"select sqrt(c8) from {dbname}.ct3", + f"select sqrt(c9) from {dbname}.ct3", + f"select sqrt(ts) from {dbname}.ct4" , + f"select sqrt(c7) from {dbname}.ct4", + f"select sqrt(c8) from {dbname}.ct4", + f"select sqrt(c9) from {dbname}.ct4", + f"select sqrt(ts) from {dbname}.stb1" , + f"select sqrt(c7) from {dbname}.stb1", + f"select sqrt(c8) from {dbname}.stb1", + f"select sqrt(c9) from {dbname}.stb1" , - "select sqrt(ts) from stbbb1" , - "select sqrt(c7) from stbbb1", + f"select sqrt(ts) from {dbname}.stbbb1" , + f"select sqrt(c7) from {dbname}.stbbb1", - "select sqrt(ts) from tbname", - "select sqrt(c9) from tbname" + f"select sqrt(ts) from {dbname}.tbname", + f"select sqrt(c9) from {dbname}.tbname" ] @@ -171,103 +169,103 @@ class TDTestCase: type_sql_lists = [ - "select sqrt(c1) from t1", - "select sqrt(c2) from t1", - "select sqrt(c3) from t1", - "select sqrt(c4) from t1", - "select sqrt(c5) from t1", - "select sqrt(c6) from t1", + f"select sqrt(c1) from {dbname}.t1", + f"select sqrt(c2) from {dbname}.t1", + f"select sqrt(c3) from {dbname}.t1", + f"select sqrt(c4) from {dbname}.t1", + f"select sqrt(c5) from {dbname}.t1", + f"select sqrt(c6) from {dbname}.t1", - "select sqrt(c1) from ct1", - "select sqrt(c2) from ct1", - "select sqrt(c3) from ct1", - "select sqrt(c4) from ct1", - "select sqrt(c5) from ct1", - "select sqrt(c6) from ct1", + f"select sqrt(c1) from {dbname}.ct1", + f"select sqrt(c2) from {dbname}.ct1", + f"select sqrt(c3) from {dbname}.ct1", + f"select sqrt(c4) from {dbname}.ct1", + f"select sqrt(c5) from {dbname}.ct1", + f"select sqrt(c6) from {dbname}.ct1", - "select sqrt(c1) from ct3", - "select sqrt(c2) from ct3", - "select sqrt(c3) from ct3", - "select sqrt(c4) from ct3", - "select sqrt(c5) from ct3", - "select sqrt(c6) from ct3", + f"select sqrt(c1) from {dbname}.ct3", + f"select sqrt(c2) from {dbname}.ct3", + f"select sqrt(c3) from {dbname}.ct3", + f"select sqrt(c4) from {dbname}.ct3", + f"select sqrt(c5) from {dbname}.ct3", + f"select sqrt(c6) from {dbname}.ct3", - "select sqrt(c1) from stb1", - "select sqrt(c2) from stb1", - "select sqrt(c3) from stb1", - "select sqrt(c4) from stb1", - "select sqrt(c5) from stb1", - "select sqrt(c6) from stb1", + f"select sqrt(c1) from {dbname}.stb1", + f"select sqrt(c2) from {dbname}.stb1", + f"select sqrt(c3) from {dbname}.stb1", + f"select sqrt(c4) from {dbname}.stb1", + f"select sqrt(c5) from {dbname}.stb1", + f"select sqrt(c6) from {dbname}.stb1", - "select sqrt(c6) as alisb from stb1", - "select sqrt(c6) alisb from stb1", + f"select sqrt(c6) as alisb from {dbname}.stb1", + f"select sqrt(c6) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - def basic_sqrt_function(self): + def basic_sqrt_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select sqrt(c1) from ct3") + tdSql.query(f"select sqrt(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c2) from ct3") + tdSql.query(f"select sqrt(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c3) from ct3") + tdSql.query(f"select sqrt(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c4) from ct3") + tdSql.query(f"select sqrt(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c5) from ct3") + tdSql.query(f"select sqrt(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c6) from ct3") + tdSql.query(f"select sqrt(c6) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select sqrt(c1) from t1") + tdSql.query(f"select sqrt(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 1.000000000) tdSql.checkData(3 , 0, 1.732050808) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1") + self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.t1") # used for sub table - tdSql.query("select c2 ,sqrt(c2) from ct1") + tdSql.query(f"select c2 ,sqrt(c2) from {dbname}.ct1") tdSql.checkData(0, 1, 298.140906284) tdSql.checkData(1 , 1, 278.885281074) tdSql.checkData(3 , 1, 235.701081881) tdSql.checkData(4 , 1, 0.000000000) - tdSql.query("select c1, c5 ,sqrt(c5) from ct4") + tdSql.query(f"select c1, c5 ,sqrt(c5) from {dbname}.ct4") tdSql.checkData(0 , 2, None) tdSql.checkData(1 , 2, 2.979932904) tdSql.checkData(2 , 2, 2.787471970) tdSql.checkData(3 , 2, 2.580697551) tdSql.checkData(5 , 2, None) - self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1") + self.check_result_auto_sqrt( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from {dbname}.ct1") # nest query for sqrt functions - tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;") + tdSql.query(f"select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 88) tdSql.checkData(0 , 1 , 9.380831520) tdSql.checkData(0 , 2 , 3.062814314) @@ -285,22 +283,22 @@ class TDTestCase: # used for stable table - tdSql.query("select sqrt(c1) from stb1") + tdSql.query(f"select sqrt(c1) from {dbname}.stb1") tdSql.checkRows(25) # used for not exists table - tdSql.error("select sqrt(c1) from stbbb1") - tdSql.error("select sqrt(c1) from tbname") - tdSql.error("select sqrt(c1) from ct5") + tdSql.error(f"select sqrt(c1) from {dbname}.stbbb1") + tdSql.error(f"select sqrt(c1) from {dbname}.tbname") + tdSql.error(f"select sqrt(c1) from {dbname}.ct5") # mix with common col - tdSql.query("select c1, sqrt(c1) from ct1") + tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1") tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 1 ,2.828427125) tdSql.checkData(4 , 0 ,0) tdSql.checkData(4 , 1 ,0.000000000) - tdSql.query("select c2, sqrt(c2) from ct4") + tdSql.query(f"select c2, sqrt(c2) from {dbname}.ct4") tdSql.checkData(0 , 0 , None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,55555) @@ -309,7 +307,7 @@ class TDTestCase: tdSql.checkData(5 , 1 ,None) # mix with common functions - tdSql.query("select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from ct4 ") + tdSql.query(f"select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) @@ -320,34 +318,34 @@ class TDTestCase: tdSql.checkData(3 , 2 ,2.449489743) tdSql.checkData(3 , 3 ,1.565084580) - tdSql.query("select c1, sqrt(c1),c5, floor(c5) from stb1 ") + tdSql.query(f"select c1, sqrt(c1),c5, floor(c5) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, sqrt(c1),c5, count(c5) from stb1 ") - tdSql.error("select c1, sqrt(c1),c5, count(c5) from ct1 ") - tdSql.error("select sqrt(c1), count(c5) from stb1 ") - tdSql.error("select sqrt(c1), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") # bug fix for count - tdSql.query("select count(c1) from ct4 ") + tdSql.query(f"select count(c1) from {dbname}.ct4 ") tdSql.checkData(0,0,9) - tdSql.query("select count(*) from ct4 ") + tdSql.query(f"select count(*) from {dbname}.ct4 ") tdSql.checkData(0,0,12) - tdSql.query("select count(c1) from stb1 ") + tdSql.query(f"select count(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,22) - tdSql.query("select count(*) from stb1 ") + tdSql.query(f"select count(*) from {dbname}.stb1 ") tdSql.checkData(0,0,25) # # bug fix for compute - tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ") + tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -355,7 +353,7 @@ class TDTestCase: tdSql.checkData(1, 1, 2.828427125) tdSql.checkData(1, 2, 2.000000000) - tdSql.query(" select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from ct4") + tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -363,57 +361,56 @@ class TDTestCase: tdSql.checkData(1, 1, 2.828427125) tdSql.checkData(1, 2, 2.710693865) - tdSql.query("select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from ct1") + tdSql.query(f"select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, sqrt(100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, 10000.000000000) - tdSql.query("select c1, sqrt(10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, 3162277.660168380) - tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(1, 1, 3162277660171.025390625) - tdSql.query("select c1, sqrt(10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sqrt(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, 100000000000000000.000000000) - tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, 100000000000000000000.000000000) - tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def pow_base_test(self): + def pow_base_test(self, dbname="db"): # base is an regular number ,int or double - tdSql.query("select c1, sqrt(c1) from ct1") + tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1") tdSql.checkData(0, 1,2.828427125) tdSql.checkRows(13) # # bug for compute in functions - # tdSql.query("select c1, abs(1/0) from ct1") + # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1") # tdSql.checkData(0, 0, 8) # tdSql.checkData(0, 1, 1) - tdSql.query("select c1, sqrt(1) from ct1") + tdSql.query(f"select c1, sqrt(1) from {dbname}.ct1") tdSql.checkData(0, 1, 1.000000000) tdSql.checkRows(13) # two cols start sqrt(x,y) - tdSql.query("select c1,c2, sqrt(c2) from ct1") + tdSql.query(f"select c1,c2, sqrt(c2) from {dbname}.ct1") tdSql.checkData(0, 2, 298.140906284) tdSql.checkData(1, 2, 278.885281074) tdSql.checkData(4, 2, 0.000000000) - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -421,7 +418,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,3.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -429,7 +426,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -437,7 +434,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=sqrt(c1) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=sqrt(c1) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,1) tdSql.checkData(0,1,11111) @@ -446,19 +443,14 @@ class TDTestCase: tdSql.checkData(0,4,0.900000000) tdSql.checkData(0,5,1.000000000) - def pow_Arithmetic(self): - pass + def check_boundary_values(self, dbname="bound_test"): - def check_boundary_values(self): - - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) @@ -474,14 +466,14 @@ class TDTestCase: tdSql.error( f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound") + self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.sub1_bound") - self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound") + self.check_result_auto_sqrt( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from {dbname}.sub1_bound") - self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" ) + self.check_result_auto_sqrt(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sqrt(abs(c1)) from {dbname}.sub1_bound" ) # check basic elem for table per row - tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ") + tdSql.query(f"select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.sqrt(2147483647)) tdSql.checkData(0,1,math.sqrt(9223372036854775807)) tdSql.checkData(0,2,math.sqrt(32767)) @@ -499,23 +491,22 @@ class TDTestCase: tdSql.checkData(3,4,math.sqrt(339999995214436424907732413799364296704.00000)) # check + - * / in functions - tdSql.query("select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from sub1_bound ") + tdSql.query(f"select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.sqrt(2147483648.000000000)) tdSql.checkData(0,1,math.sqrt(9223372036854775807)) tdSql.checkData(0,2,math.sqrt(32767.000000000)) tdSql.checkData(0,3,math.sqrt(63.500000000)) - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto_sqrt( " select c5 from stb1 order by ts " , "select sqrt(c5) from stb1 order by ts" ) - self.check_result_auto_sqrt( " select c5 from stb1 order by tbname " , "select sqrt(c5) from stb1 order by tbname" ) - self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" ) + def support_super_table_test(self, dbname="db"): + self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by ts " , f"select sqrt(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sqrt( " select t1,c5 from stb1 order by ts " , "select sqrt(t1), sqrt(c5) from stb1 order by ts" ) - self.check_result_auto_sqrt( " select t1,c5 from stb1 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 order by tbname" ) - self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) , sqrt(c5) from stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sqrt(t1), sqrt(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) , sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) pass def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index e89970b6ff..d741b6745b 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -125,7 +125,7 @@ python3 ./test.py -f 2-query/leastsquares.py -R python3 ./test.py -f 2-query/length.py python3 ./test.py -f 2-query/length.py -R python3 ./test.py -f 2-query/ltrim.py -# python3 ./test.py -f 2-query/ltrim.py -R +python3 ./test.py -f 2-query/ltrim.py -R python3 ./test.py -f 2-query/max_partition.py python3 ./test.py -f 2-query/max_partition.py -R python3 ./test.py -f 2-query/max.py @@ -143,11 +143,19 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R python3 ./test.py -f 2-query/round.py python3 ./test.py -f 2-query/round.py -R python3 ./test.py -f 2-query/rtrim.py -# python3 ./test.py -f 2-query/rtrim.py -R +python3 ./test.py -f 2-query/rtrim.py -R python3 ./test.py -f 2-query/sample.py python3 ./test.py -f 2-query/sample.py -R python3 ./test.py -f 2-query/sin.py # python3 ./test.py -f 2-query/sin.py -R +python3 ./test.py -f 2-query/smaTest.py +python3 ./test.py -f 2-query/smaTest.py -R +python3 ./test.py -f 2-query/sml.py +python3 ./test.py -f 2-query/sml.py -R +python3 ./test.py -f 2-query/spread.py +python3 ./test.py -f 2-query/spread.py -R +python3 ./test.py -f 2-query/sqrt.py +# python3 ./test.py -f 2-query/sqrt.py -R python3 ./test.py -f 1-insert/update_data.py @@ -162,7 +170,6 @@ python3 ./test.py -f 2-query/substr.py python3 ./test.py -f 2-query/union.py python3 ./test.py -f 2-query/union1.py python3 ./test.py -f 2-query/concat2.py -python3 ./test.py -f 2-query/spread.py python3 ./test.py -f 2-query/timezone.py @@ -175,7 +182,6 @@ python3 ./test.py -f 2-query/json_tag.py python3 ./test.py -f 2-query/top.py python3 ./test.py -f 2-query/log.py -python3 ./test.py -f 2-query/sqrt.py python3 ./test.py -f 2-query/tan.py # python3 ./test.py -f 2-query/nestedQuery.py # TD-15983 subquery output duplicate name column. From 53e3b2e5f5bf0cb362009d0ab4aec5ac49fa27a4 Mon Sep 17 00:00:00 2001 From: cpwu Date: Thu, 18 Aug 2022 11:14:05 +0800 Subject: [PATCH 09/72] fix case, add case support rest api --- tests/pytest/util/sql.py | 20 +- tests/system-test/2-query/statecount.py | 404 ++++++++++---------- tests/system-test/2-query/substr.py | 46 ++- tests/system-test/2-query/sum.py | 65 ++-- tests/system-test/2-query/tail.py | 478 ++++++++++++------------ tests/system-test/2-query/tan.py | 445 +++++++++++----------- tests/system-test/fulltest.sh | 17 +- 7 files changed, 717 insertions(+), 758 deletions(-) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 753c41e094..c0c86f0d85 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -102,7 +102,7 @@ class TDSql: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, sql, repr(e)) tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) + raise Exception(repr(e)) i+=1 time.sleep(1) pass @@ -254,21 +254,7 @@ class TDSql: args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) - if data is None: - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) - elif isinstance(data, str): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) - elif isinstance(data, datetime.date): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) - elif isinstance(data, float): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) - else: - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") def getData(self, row, col): self.checkRowCol(row, col) @@ -307,7 +293,7 @@ class TDSql: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, sql, repr(e)) tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) + raise Exception(repr(e)) i+=1 time.sleep(1) pass diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py index a88c4aef9f..c73c955de4 100644 --- a/tests/system-test/2-query/statecount.py +++ b/tests/system-test/2-query/statecount.py @@ -11,50 +11,47 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -70,68 +67,68 @@ class TDTestCase: ''' ) - def test_errors(self): + def test_errors(self, dbname="db"): error_sql_lists = [ - # "select statecount(c1,'GT',5) from t1" - "select statecount from t1", - "select statecount(123--123)==1 from t1", - "select statecount(123,123) from t1", - "select statecount(c1,ts) from t1", - "select statecount(c1,c1,ts) from t1", - "select statecount(c1 ,c2 ) from t1", - "select statecount(c1 ,NULL) from t1", - #"select statecount(c1 ,'NULL',1.0) from t1", - "select statecount(c1 ,'GT','1') from t1", - "select statecount(c1 ,'GT','tbname') from t1", - "select statecount(c1 ,'GT','*') from t1", - "select statecount(c1 ,'GT',ts) from t1", - "select statecount(c1 ,'GT',max(c1)) from t1", - # "select statecount(abs(c1) ,'GT',1) from t1", - # "select statecount(c1+2 ,'GT',1) from t1", - "select statecount(c1 ,'GT',1,1u) from t1", - "select statecount(c1 ,'GT',1,now) from t1", - "select statecount(c1 ,'GT','1') from t1", - "select statecount(c1 ,'GT','1',True) from t1", - "select statecount(statecount(c1) ab from t1)", - "select statecount(c1 ,'GT',1,,)int from t1", - "select statecount('c1','GT',1) from t1", - "select statecount('c1','GT' , NULL) from t1", - "select statecount('c1','GT', 1 , '') from t1", - "select statecount('c1','GT', 1 ,c%) from t1", - "select statecount(c1 ,'GT',1,t1) from t1", - "select statecount(c1 ,'GT',1,True) from t1", - "select statecount(c1 ,'GT',1) , count(c1) from t1", - "select statecount(c1 ,'GT',1) , avg(c1) from t1", - "select statecount(c1 ,'GT',1) , min(c1) from t1", - "select statecount(c1 ,'GT',1) , spread(c1) from t1", - "select statecount(c1 ,'GT',1) , diff(c1) from t1", + # f"select statecount(c1,'GT',5) from {dbname}.t1" + f"select statecount from {dbname}.t1", + f"select statecount(123--123)==1 from {dbname}.t1", + f"select statecount(123,123) from {dbname}.t1", + f"select statecount(c1,ts) from {dbname}.t1", + f"select statecount(c1,c1,ts) from {dbname}.t1", + f"select statecount(c1 ,c2 ) from {dbname}.t1", + f"select statecount(c1 ,NULL) from {dbname}.t1", + #f"select statecount(c1 ,'NULL',1.0) from {dbname}.t1", + f"select statecount(c1 ,'GT','1') from {dbname}.t1", + f"select statecount(c1 ,'GT','tbname') from {dbname}.t1", + f"select statecount(c1 ,'GT','*') from {dbname}.t1", + f"select statecount(c1 ,'GT',ts) from {dbname}.t1", + f"select statecount(c1 ,'GT',max(c1)) from {dbname}.t1", + # f"select statecount(abs(c1) ,'GT',1) from {dbname}.t1", + # f"select statecount(c1+2 ,'GT',1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1,1u) from {dbname}.t1", + f"select statecount(c1 ,'GT',1,now) from {dbname}.t1", + f"select statecount(c1 ,'GT','1') from {dbname}.t1", + f"select statecount(c1 ,'GT','1',True) from {dbname}.t1", + f"select statecount(statecount(c1) ab from {dbname}.t1)", + f"select statecount(c1 ,'GT',1,,)int from {dbname}.t1", + f"select statecount('c1','GT',1) from {dbname}.t1", + f"select statecount('c1','GT' , NULL) from {dbname}.t1", + f"select statecount('c1','GT', 1 , '') from {dbname}.t1", + f"select statecount('c1','GT', 1 ,c%) from {dbname}.t1", + f"select statecount(c1 ,'GT',1,t1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1,True) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , count(c1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , avg(c1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , min(c1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , spread(c1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , diff(c1) from {dbname}.t1", ] for error_sql in error_sql_lists: tdSql.error(error_sql) pass - def support_types(self): + def support_types(self, dbname="db"): other_no_value_types = [ - "select statecount(ts,'GT',1) from t1" , - "select statecount(c7,'GT',1) from t1", - "select statecount(c8,'GT',1) from t1", - "select statecount(c9,'GT',1) from t1", - "select statecount(ts,'GT',1) from ct1" , - "select statecount(c7,'GT',1) from ct1", - "select statecount(c8,'GT',1) from ct1", - "select statecount(c9,'GT',1) from ct1", - "select statecount(ts,'GT',1) from ct3" , - "select statecount(c7,'GT',1) from ct3", - "select statecount(c8,'GT',1) from ct3", - "select statecount(c9,'GT',1) from ct3", - "select statecount(ts,'GT',1) from ct4" , - "select statecount(c7,'GT',1) from ct4", - "select statecount(c8,'GT',1) from ct4", - "select statecount(c9,'GT',1) from ct4", - "select statecount(ts,'GT',1) from stb1 partition by tbname" , - "select statecount(c7,'GT',1) from stb1 partition by tbname", - "select statecount(c8,'GT',1) from stb1 partition by tbname", - "select statecount(c9,'GT',1) from stb1 partition by tbname" + f"select statecount(ts,'GT',1) from {dbname}.t1" , + f"select statecount(c7,'GT',1) from {dbname}.t1", + f"select statecount(c8,'GT',1) from {dbname}.t1", + f"select statecount(c9,'GT',1) from {dbname}.t1", + f"select statecount(ts,'GT',1) from {dbname}.ct1" , + f"select statecount(c7,'GT',1) from {dbname}.ct1", + f"select statecount(c8,'GT',1) from {dbname}.ct1", + f"select statecount(c9,'GT',1) from {dbname}.ct1", + f"select statecount(ts,'GT',1) from {dbname}.ct3" , + f"select statecount(c7,'GT',1) from {dbname}.ct3", + f"select statecount(c8,'GT',1) from {dbname}.ct3", + f"select statecount(c9,'GT',1) from {dbname}.ct3", + f"select statecount(ts,'GT',1) from {dbname}.ct4" , + f"select statecount(c7,'GT',1) from {dbname}.ct4", + f"select statecount(c8,'GT',1) from {dbname}.ct4", + f"select statecount(c9,'GT',1) from {dbname}.ct4", + f"select statecount(ts,'GT',1) from {dbname}.stb1 partition by tbname" , + f"select statecount(c7,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c8,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c9,'GT',1) from {dbname}.stb1 partition by tbname" ] for type_sql in other_no_value_types: @@ -139,224 +136,222 @@ class TDTestCase: tdLog.info("support type ok , sql is : %s"%type_sql) type_sql_lists = [ - "select statecount(c1,'GT',1) from t1", - "select statecount(c2,'GT',1) from t1", - "select statecount(c3,'GT',1) from t1", - "select statecount(c4,'GT',1) from t1", - "select statecount(c5,'GT',1) from t1", - "select statecount(c6,'GT',1) from t1", + f"select statecount(c1,'GT',1) from {dbname}.t1", + f"select statecount(c2,'GT',1) from {dbname}.t1", + f"select statecount(c3,'GT',1) from {dbname}.t1", + f"select statecount(c4,'GT',1) from {dbname}.t1", + f"select statecount(c5,'GT',1) from {dbname}.t1", + f"select statecount(c6,'GT',1) from {dbname}.t1", - "select statecount(c1,'GT',1) from ct1", - "select statecount(c2,'GT',1) from ct1", - "select statecount(c3,'GT',1) from ct1", - "select statecount(c4,'GT',1) from ct1", - "select statecount(c5,'GT',1) from ct1", - "select statecount(c6,'GT',1) from ct1", + f"select statecount(c1,'GT',1) from {dbname}.ct1", + f"select statecount(c2,'GT',1) from {dbname}.ct1", + f"select statecount(c3,'GT',1) from {dbname}.ct1", + f"select statecount(c4,'GT',1) from {dbname}.ct1", + f"select statecount(c5,'GT',1) from {dbname}.ct1", + f"select statecount(c6,'GT',1) from {dbname}.ct1", - "select statecount(c1,'GT',1) from ct3", - "select statecount(c2,'GT',1) from ct3", - "select statecount(c3,'GT',1) from ct3", - "select statecount(c4,'GT',1) from ct3", - "select statecount(c5,'GT',1) from ct3", - "select statecount(c6,'GT',1) from ct3", + f"select statecount(c1,'GT',1) from {dbname}.ct3", + f"select statecount(c2,'GT',1) from {dbname}.ct3", + f"select statecount(c3,'GT',1) from {dbname}.ct3", + f"select statecount(c4,'GT',1) from {dbname}.ct3", + f"select statecount(c5,'GT',1) from {dbname}.ct3", + f"select statecount(c6,'GT',1) from {dbname}.ct3", - "select statecount(c1,'GT',1) from stb1 partition by tbname", - "select statecount(c2,'GT',1) from stb1 partition by tbname", - "select statecount(c3,'GT',1) from stb1 partition by tbname", - "select statecount(c4,'GT',1) from stb1 partition by tbname", - "select statecount(c5,'GT',1) from stb1 partition by tbname", - "select statecount(c6,'GT',1) from stb1 partition by tbname", + f"select statecount(c1,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c2,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c3,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c4,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c5,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c6,'GT',1) from {dbname}.stb1 partition by tbname", - "select statecount(c6,'GT',1) as alisb from stb1 partition by tbname", - "select statecount(c6,'GT',1) alisb from stb1 partition by tbname", + f"select statecount(c6,'GT',1) as alisb from {dbname}.stb1 partition by tbname", + f"select statecount(c6,'GT',1) alisb from {dbname}.stb1 partition by tbname", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - def support_opers(self): + def support_opers(self, dbname="db"): oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ'] oper_errors = [",","*","NULL","tbname","ts","sum","_c0"] for oper in oper_lists: - tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from t1") + tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1") tdSql.checkRows(12) for oper in oper_errors: - tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from t1") + tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1") - - def basic_statecount_function(self): + def basic_statecount_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") # will support _rowts mix with - # tdSql.query("select (c6,'GT',1),_rowts from ct3") + # tdSql.query(f"select (c6,'GT',1),_rowts from {dbname}.ct3") # auto check for t1 table # used for regular table - tdSql.query("select statecount(c6,'GT',1) from t1") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.t1") # unique with super tags - tdSql.query("select statecount(c6,'GT',1) from ct1") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1) from ct4") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4") tdSql.checkRows(12) - tdSql.query("select statecount(c6,'GT',1),tbname from ct1") + tdSql.query(f"select statecount(c6,'GT',1),tbname from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1),t1 from ct1") + tdSql.query(f"select statecount(c6,'GT',1),t1 from {dbname}.ct1") tdSql.checkRows(13) # unique with common col - tdSql.query("select statecount(c6,'GT',1) ,ts from ct1") + tdSql.query(f"select statecount(c6,'GT',1) ,ts from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, statecount(c6,'GT',1) from ct1") + tdSql.query(f"select ts, statecount(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1) ,c1 from ct1") + tdSql.query(f"select statecount(c6,'GT',1) ,c1 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select c1, statecount(c6,'GT',1) from ct1") + tdSql.query(f"select c1, statecount(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1) from ct1") + tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1), ts, c1, c2, c3 from ct1") + tdSql.query(f"select statecount(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from ct1") + tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select stateduration(c6,'GT',1) ,ts from ct1") + tdSql.query(f"select stateduration(c6,'GT',1) ,ts from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, stateduration(c6,'GT',1) from ct1") + tdSql.query(f"select ts, stateduration(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select stateduration(c6,'GT',1) ,c1 from ct1") + tdSql.query(f"select stateduration(c6,'GT',1) ,c1 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select c1, stateduration(c6,'GT',1) from ct1") + tdSql.query(f"select c1, stateduration(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1) from ct1") + tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select stateduration(c6,'GT',1), ts, c1, c2, c3 from ct1") + tdSql.query(f"select stateduration(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from ct1") + tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1") tdSql.checkRows(13) # unique with scalar function - tdSql.query("select statecount(c6,'GT',1) , abs(c1) from ct1") + tdSql.query(f"select statecount(c6,'GT',1) , abs(c1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1) , abs(c2)+2 from ct1") + tdSql.query(f"select statecount(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.error("select statecount(c6,'GT',1) , unique(c2) from ct1") + tdSql.error(f"select statecount(c6,'GT',1) , unique(c2) from {dbname}.ct1") - tdSql.query("select stateduration(c6,'GT',1) , abs(c1) from ct1") + tdSql.query(f"select stateduration(c6,'GT',1) , abs(c1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select stateduration(c6,'GT',1) , abs(c2)+2 from ct1") + tdSql.query(f"select stateduration(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.error("select stateduration(c6,'GT',1) , unique(c2) from ct1") + tdSql.error(f"select stateduration(c6,'GT',1) , unique(c2) from {dbname}.ct1") # unique with aggregate function - tdSql.error("select statecount(c6,'GT',1) ,sum(c1) from ct1") - tdSql.error("select statecount(c6,'GT',1) ,max(c1) from ct1") - tdSql.error("select statecount(c6,'GT',1) ,csum(c1) from ct1") - tdSql.error("select statecount(c6,'GT',1) ,count(c1) from ct1") + tdSql.error(f"select statecount(c6,'GT',1) ,sum(c1) from {dbname}.ct1") + tdSql.error(f"select statecount(c6,'GT',1) ,max(c1) from {dbname}.ct1") + tdSql.error(f"select statecount(c6,'GT',1) ,csum(c1) from {dbname}.ct1") + tdSql.error(f"select statecount(c6,'GT',1) ,count(c1) from {dbname}.ct1") # unique with filter where - tdSql.query("select statecount(c6,'GT',1) from ct4 where c1 is null") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4 where c1 is null") tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, None) tdSql.checkData(2, 0, None) - tdSql.query("select statecount(c1,'GT',1) from t1 where c1 >2 ") + tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.t1 where c1 >2 ") tdSql.checkData(0, 0, 1) tdSql.checkData(1, 0, 2) tdSql.checkData(2, 0, 3) tdSql.checkData(4, 0, 5) tdSql.checkData(5, 0, 6) - tdSql.query("select statecount(c2,'GT',1) from t1 where c2 between 0 and 99999") + tdSql.query(f"select statecount(c2,'GT',1) from {dbname}.t1 where c2 between 0 and 99999") tdSql.checkData(0, 0, 1) tdSql.checkData(1, 0, 2) tdSql.checkData(6, 0, -1) # unique with union all - tdSql.query("select statecount(c1,'GT',1) from ct4 union all select statecount(c1,'GT',1) from ct1") + tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select statecount(c1,'GT',1) from {dbname}.ct1") tdSql.checkRows(25) - tdSql.query("select statecount(c1,'GT',1) from ct4 union all select distinct(c1) from ct4") + tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4") tdSql.checkRows(22) # unique with join # prepare join datas with same ts - tdSql.execute(" use db ") - tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table tb1 using st1 tags(1)") - tdSql.execute(" create table tb2 using st1 tags(2)") + tdSql.execute(f"create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(f"create table {dbname}.tb1 using {dbname}.st1 tags(1)") + tdSql.execute(f"create table {dbname}.tb2 using {dbname}.st1 tags(2)") - tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table ttb1 using st2 tags(1)") - tdSql.execute(" create table ttb2 using st2 tags(2)") + tdSql.execute(f"create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(f"create table {dbname}.ttb1 using {dbname}.st2 tags(1)") + tdSql.execute(f"create table {dbname}.ttb2 using {dbname}.st2 tags(2)") start_ts = 1622369635000 # 2021-05-30 18:13:55 for i in range(10): ts_value = start_ts+i*1000 - tdSql.execute(f" insert into tb1 values({ts_value} , {i})") - tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})") - tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ") tdSql.checkRows(10) tdSql.checkData(0,0,-1) tdSql.checkData(1,0,-1) tdSql.checkData(2,0,1) tdSql.checkData(9,0,8) - tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ") tdSql.checkRows(20) # nest query - # tdSql.query("select unique(c1) from (select c1 from ct1)") - tdSql.query("select c1 from (select statecount(c1,'GT',1) c1 from t1)") + # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)") + tdSql.query(f"select c1 from (select statecount(c1,'GT',1) c1 from {dbname}.t1)") tdSql.checkRows(12) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, -1) tdSql.checkData(2, 0, 1) tdSql.checkData(10, 0, 8) - tdSql.query("select sum(c1) from (select statecount(c1,'GT',1) c1 from t1)") + tdSql.query(f"select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)") tdSql.checkRows(1) tdSql.checkData(0, 0, 35) - tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from ct1)") + tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.ct1)") tdSql.checkRows(2) - tdSql.query("select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from t1)") + tdSql.query(f"select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)") tdSql.checkRows(12) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, 0.000000000) @@ -365,43 +360,41 @@ class TDTestCase: # bug for stable #partition by tbname - # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ") # tdSql.checkRows(21) - # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ") # tdSql.checkRows(21) # group by - tdSql.error("select statecount(c1,'GT',1) from ct1 group by c1") - tdSql.error("select statecount(c1,'GT',1) from ct1 group by tbname") + tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by c1") + tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by tbname") - # super table - - def check_unit_time(self): - tdSql.execute(" use db ") - tdSql.error("select stateduration(c1,'GT',1,1b) from ct1") - tdSql.error("select stateduration(c1,'GT',1,1u) from ct1") - tdSql.error("select stateduration(c1,'GT',1,1000s) from t1") - tdSql.error("select stateduration(c1,'GT',1,10m) from t1") - tdSql.error("select stateduration(c1,'GT',1,10d) from t1") - tdSql.query("select stateduration(c1,'GT',1,1s) from t1") + def check_unit_time(self, dbname="db"): + tdSql.error(f"select stateduration(c1,'GT',1,1b) from {dbname}.ct1") + tdSql.error(f"select stateduration(c1,'GT',1,1u) from {dbname}.ct1") + tdSql.error(f"select stateduration(c1,'GT',1,1000s) from {dbname}.t1") + tdSql.error(f"select stateduration(c1,'GT',1,10m) from {dbname}.t1") + tdSql.error(f"select stateduration(c1,'GT',1,10d) from {dbname}.t1") + tdSql.query(f"select stateduration(c1,'GT',1,1s) from {dbname}.t1") tdSql.checkData(10,0,63072035) - tdSql.query("select stateduration(c1,'GT',1,1m) from t1") + tdSql.query(f"select stateduration(c1,'GT',1,1m) from {dbname}.t1") tdSql.checkData(10,0,int(63072035/60)) - tdSql.query("select stateduration(c1,'GT',1,1h) from t1") + tdSql.query(f"select stateduration(c1,'GT',1,1h) from {dbname}.t1") tdSql.checkData(10,0,int(63072035/60/60)) - tdSql.query("select stateduration(c1,'GT',1,1d) from t1") + tdSql.query(f"select stateduration(c1,'GT',1,1d) from {dbname}.t1") tdSql.checkData(10,0,int(63072035/60/24/60)) - tdSql.query("select stateduration(c1,'GT',1,1w) from t1") + tdSql.query(f"select stateduration(c1,'GT',1,1w) from {dbname}.t1") tdSql.checkData(10,0,int(63072035/60/7/24/60)) def query_precision(self): def generate_data(precision="ms"): - tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision)) + dbname = f"db_{precision}" + tdSql.execute(f"create database if not exists db_%s precision '%s';" %(precision, precision)) tdSql.execute("use db_%s;" %precision) - tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision) - tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision) - tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision) + tdSql.execute(f"create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision) + tdSql.execute(f"create table db_%s.tb1 using {dbname}.st tags(1);"%precision) + tdSql.execute(f"create table db_%s.tb2 using {dbname}.st tags(2);"%precision) if precision == "ms": start_ts = self.ts @@ -432,55 +425,54 @@ class TDTestCase: if pres == "ms": if unit in ["1u","1b"]: - tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) pass else: - tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) elif pres == "us" and unit in ["1b"]: if unit in ["1b"]: - tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) pass else: - tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) else: - tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) basic_result = 70 tdSql.checkData(9,0,basic_result*pow(1000,index)) - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - tdSql.query("select statecount(c1,'GT',1) from sub1_bound") + tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.sub1_bound") tdSql.checkRows(5) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py index f833a42b57..ea55c5e44e 100644 --- a/tests/system-test/2-query/substr.py +++ b/tests/system-test/2-query/substr.py @@ -127,16 +127,16 @@ class TDTestCase: return sqls - def __test_current(self): # sourcery skip: use-itertools-product + def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__substr_check(tb, CURRENT_POS, LENS) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__substr_err_check(tb): @@ -145,22 +145,21 @@ class TDTestCase: tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") - def all_test(self): - self.__test_current() - self.__test_error() + def all_test(self, dbname="db"): + self.__test_current(dbname) + self.__test_error(dbname) - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (tag1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -170,29 +169,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -208,7 +207,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -224,13 +223,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -259,10 +258,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) - - tdSql.execute("use db") + tdSql.execute("flush database db") tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py index 4f5ed34419..dbc79e25f5 100644 --- a/tests/system-test/2-query/sum.py +++ b/tests/system-test/2-query/sum.py @@ -89,14 +89,14 @@ class TDTestCase: return sqls - def __test_current(self): + def __test_current(self, dbname="db"): tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") tbname = ["ct1", "ct2", "ct4", "t1"] for tb in tbname: self.__sum_current_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") tbname = ["ct1", "ct2", "ct4", "t1"] @@ -106,21 +106,21 @@ class TDTestCase: tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") - def all_test(self): - self.__test_current() - self.__test_error() + def all_test(self, dbname="db"): + self.__test_current(dbname) + self.__test_error(dbname) - def __create_tb(self): + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table {DBNAME}.stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) + ) tags (tag1 int) ''' - create_ntb_sql = f'''create table {DBNAME}.t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -130,83 +130,82 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table {DBNAME}.ct{i+1} using {DBNAME}.stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into {DBNAME}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into {DBNAME}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into {DBNAME}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into {DBNAME}.ct1 values - ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } ) - ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } ) + f'''insert into {dbname}.ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into {DBNAME}.ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, - { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000} + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} ) ( { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, - { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000} + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} ) ''' ) tdSql.execute( - f'''insert into {DBNAME}.ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, - { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, - { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) for i in range(rows): - insert_data = f'''insert into {DBNAME}.t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, - "binary_{i}", "nchar_{i}", { now_time - 1000 * i } ) + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into {DBNAME}.t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, - "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, - "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) - def run(self): tdSql.prepare() @@ -219,12 +218,8 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) - tdSql.execute("flush database db") - tdSql.execute("use db") tdLog.printNoPrefix("==========step4:after wal, all check again ") diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py index d708873d6f..687023f57e 100644 --- a/tests/system-test/2-query/tail.py +++ b/tests/system-test/2-query/tail.py @@ -10,49 +10,46 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - - def prepare_datas(self): + + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) - + tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -67,115 +64,115 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - - def test_errors(self): + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select tail from t1", - "select tail(123--123)==1 from t1", - "select tail(123,123) from t1", - "select tail(c1,ts) from t1", - "select tail(c1,c1,ts) from t1", - "select tail(c1) as 'd1' from t1", - "select tail(c1 ,c2 ) from t1", - "select tail(c1 ,NULL) from t1", - "select tail(,) from t1;", - "select tail(tail(c1) ab from t1)", - "select tail(c1) as int from t1", - "select tail('c1') from t1", - "select tail(NULL) from t1", - "select tail('') from t1", - "select tail(c%) from t1", - "select tail(t1) from t1", - "select tail(True) from t1", - "select tail(c1,1) , count(c1) from t1", - "select tail(c1,1) , avg(c1) from t1", - "select tail(c1,1) , min(c1) from t1", - "select tail(c1,1) , spread(c1) from t1", - "select tail(c1,1) , diff(c1) from t1", - "select tail from stb1 partition by tbname", - "select tail(123--123)==1 from stb1 partition by tbname", - "select tail(123,123) from stb1 partition by tbname", - "select tail(c1,ts) from stb1 partition by tbname", - "select tail(c1,c1,ts) from stb1 partition by tbname", - "select tail(c1) as 'd1' from stb1 partition by tbname", - "select tail(c1 ,c2 ) from stb1 partition by tbname", - "select tail(c1 ,NULL) from stb1 partition by tbname", - "select tail(,) from stb1 partition by tbname;", - "select tail(tail(c1) ab from stb1 partition by tbname)", - "select tail(c1) as int from stb1 partition by tbname", - "select tail('c1') from stb1 partition by tbname", - "select tail(NULL) from stb1 partition by tbname", - "select tail('') from stb1 partition by tbname", - "select tail(c%) from stb1 partition by tbname", - "select tail(t1) from stb1 partition by tbname", - "select tail(True) from stb1 partition by tbname", - "select tail(c1,1) , count(c1) from stb1 partition by tbname", - "select tail(c1,1) , avg(c1) from stb1 partition by tbname", - "select tail(c1,1) , min(c1) from stb1 partition by tbname", - "select tail(c1,1) , spread(c1) from stb1 partition by tbname", - "select tail(c1,1) , diff(c1) from stb1 partition by tbname", + f"select tail from {dbname}.t1", + f"select tail(123--123)==1 from {dbname}.t1", + f"select tail(123,123) from {dbname}.t1", + f"select tail(c1,ts) from {dbname}.t1", + f"select tail(c1,c1,ts) from {dbname}.t1", + f"select tail(c1) as 'd1' from {dbname}.t1", + f"select tail(c1 ,c2 ) from {dbname}.t1", + f"select tail(c1 ,NULL) from {dbname}.t1", + f"select tail(,) from {dbname}.t1;", + f"select tail(tail(c1) ab from {dbname}.t1)", + f"select tail(c1) as int from {dbname}.t1", + f"select tail('c1') from {dbname}.t1", + f"select tail(NULL) from {dbname}.t1", + f"select tail('') from {dbname}.t1", + f"select tail(c%) from {dbname}.t1", + f"select tail(t1) from {dbname}.t1", + f"select tail(True) from {dbname}.t1", + f"select tail(c1,1) , count(c1) from {dbname}.t1", + f"select tail(c1,1) , avg(c1) from {dbname}.t1", + f"select tail(c1,1) , min(c1) from {dbname}.t1", + f"select tail(c1,1) , spread(c1) from {dbname}.t1", + f"select tail(c1,1) , diff(c1) from {dbname}.t1", + f"select tail from {dbname}.stb1 partition by tbname", + f"select tail(123--123)==1 from {dbname}.stb1 partition by tbname", + f"select tail(123,123) from {dbname}.stb1 partition by tbname", + f"select tail(c1,ts) from {dbname}.stb1 partition by tbname", + f"select tail(c1,c1,ts) from {dbname}.stb1 partition by tbname", + f"select tail(c1) as 'd1' from {dbname}.stb1 partition by tbname", + f"select tail(c1 ,c2 ) from {dbname}.stb1 partition by tbname", + f"select tail(c1 ,NULL) from {dbname}.stb1 partition by tbname", + f"select tail(,) from {dbname}.stb1 partition by tbname;", + f"select tail(tail(c1) ab from {dbname}.stb1 partition by tbname)", + f"select tail(c1) as int from {dbname}.stb1 partition by tbname", + f"select tail('c1') from {dbname}.stb1 partition by tbname", + f"select tail(NULL) from {dbname}.stb1 partition by tbname", + f"select tail('') from {dbname}.stb1 partition by tbname", + f"select tail(c%) from {dbname}.stb1 partition by tbname", + f"select tail(t1) from {dbname}.stb1 partition by tbname", + f"select tail(True) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , count(c1) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , avg(c1) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , min(c1) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , spread(c1) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , diff(c1) from {dbname}.stb1 partition by tbname", ] for error_sql in error_sql_lists: tdSql.error(error_sql) - - def support_types(self): + + def support_types(self, dbname="db"): other_no_value_types = [ - "select tail(ts,1) from t1" , - "select tail(c7,1) from t1", - "select tail(c8,1) from t1", - "select tail(c9,1) from t1", - "select tail(ts,1) from ct1" , - "select tail(c7,1) from ct1", - "select tail(c8,1) from ct1", - "select tail(c9,1) from ct1", - "select tail(ts,1) from ct3" , - "select tail(c7,1) from ct3", - "select tail(c8,1) from ct3", - "select tail(c9,1) from ct3", - "select tail(ts,1) from ct4" , - "select tail(c7,1) from ct4", - "select tail(c8,1) from ct4", - "select tail(c9,1) from ct4", - "select tail(ts,1) from stb1 partition by tbname" , - "select tail(c7,1) from stb1 partition by tbname", - "select tail(c8,1) from stb1 partition by tbname", - "select tail(c9,1) from stb1 partition by tbname" + f"select tail(ts,1) from {dbname}.t1" , + f"select tail(c7,1) from {dbname}.t1", + f"select tail(c8,1) from {dbname}.t1", + f"select tail(c9,1) from {dbname}.t1", + f"select tail(ts,1) from {dbname}.ct1" , + f"select tail(c7,1) from {dbname}.ct1", + f"select tail(c8,1) from {dbname}.ct1", + f"select tail(c9,1) from {dbname}.ct1", + f"select tail(ts,1) from {dbname}.ct3" , + f"select tail(c7,1) from {dbname}.ct3", + f"select tail(c8,1) from {dbname}.ct3", + f"select tail(c9,1) from {dbname}.ct3", + f"select tail(ts,1) from {dbname}.ct4" , + f"select tail(c7,1) from {dbname}.ct4", + f"select tail(c8,1) from {dbname}.ct4", + f"select tail(c9,1) from {dbname}.ct4", + f"select tail(ts,1) from {dbname}.stb1 partition by tbname" , + f"select tail(c7,1) from {dbname}.stb1 partition by tbname", + f"select tail(c8,1) from {dbname}.stb1 partition by tbname", + f"select tail(c9,1) from {dbname}.stb1 partition by tbname" ] - + for type_sql in other_no_value_types: tdSql.query(type_sql) - + type_sql_lists = [ - "select tail(c1,1) from t1", - "select tail(c2,1) from t1", - "select tail(c3,1) from t1", - "select tail(c4,1) from t1", - "select tail(c5,1) from t1", - "select tail(c6,1) from t1", + f"select tail(c1,1) from {dbname}.t1", + f"select tail(c2,1) from {dbname}.t1", + f"select tail(c3,1) from {dbname}.t1", + f"select tail(c4,1) from {dbname}.t1", + f"select tail(c5,1) from {dbname}.t1", + f"select tail(c6,1) from {dbname}.t1", - "select tail(c1,1) from ct1", - "select tail(c2,1) from ct1", - "select tail(c3,1) from ct1", - "select tail(c4,1) from ct1", - "select tail(c5,1) from ct1", - "select tail(c6,1) from ct1", + f"select tail(c1,1) from {dbname}.ct1", + f"select tail(c2,1) from {dbname}.ct1", + f"select tail(c3,1) from {dbname}.ct1", + f"select tail(c4,1) from {dbname}.ct1", + f"select tail(c5,1) from {dbname}.ct1", + f"select tail(c6,1) from {dbname}.ct1", - "select tail(c1,1) from ct3", - "select tail(c2,1) from ct3", - "select tail(c3,1) from ct3", - "select tail(c4,1) from ct3", - "select tail(c5,1) from ct3", - "select tail(c6,1) from ct3", + f"select tail(c1,1) from {dbname}.ct3", + f"select tail(c2,1) from {dbname}.ct3", + f"select tail(c3,1) from {dbname}.ct3", + f"select tail(c4,1) from {dbname}.ct3", + f"select tail(c5,1) from {dbname}.ct3", + f"select tail(c6,1) from {dbname}.ct3", - "select tail(c1,1) from stb1 partition by tbname", - "select tail(c2,1) from stb1 partition by tbname", - "select tail(c3,1) from stb1 partition by tbname", - "select tail(c4,1) from stb1 partition by tbname", - "select tail(c5,1) from stb1 partition by tbname", - "select tail(c6,1) from stb1 partition by tbname", + f"select tail(c1,1) from {dbname}.stb1 partition by tbname", + f"select tail(c2,1) from {dbname}.stb1 partition by tbname", + f"select tail(c3,1) from {dbname}.stb1 partition by tbname", + f"select tail(c4,1) from {dbname}.stb1 partition by tbname", + f"select tail(c5,1) from {dbname}.stb1 partition by tbname", + f"select tail(c6,1) from {dbname}.stb1 partition by tbname", - "select tail(c6,1) as alisb from stb1 partition by tbname", - "select tail(c6,1) alisb from stb1 partition by tbname", + f"select tail(c6,1) as alisb from {dbname}.stb1 partition by tbname", + f"select tail(c6,1) alisb from {dbname}.stb1 partition by tbname", ] for type_sql in type_sql_lists: @@ -189,7 +186,6 @@ class TDTestCase: tail_result = tdSql.queryResult tdSql.query(equal_sql) - print(equal_sql) equal_result = tdSql.queryResult @@ -198,257 +194,255 @@ class TDTestCase: else: tdLog.exit(" tail query check fail , tail sql is: %s " %tail_sql) - def basic_tail_function(self): + def basic_tail_function(self, dbname="db"): - # basic query - tdSql.query("select c1 from ct3") + # basic query + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select tail(c1,1) from ct3") + tdSql.query(f"select tail(c1,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c2,1) from ct3") + tdSql.query(f"select tail(c2,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c3,1) from ct3") + tdSql.query(f"select tail(c3,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c4,1) from ct3") + tdSql.query(f"select tail(c4,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c5,1) from ct3") + tdSql.query(f"select tail(c5,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c6,1) from ct3") - + tdSql.query(f"select tail(c6,1) from {dbname}.ct3") + # auto check for t1 table # used for regular table - tdSql.query("select tail(c1,1) from t1") - - tdSql.query("desc t1") + tdSql.query(f"select tail(c1,1) from {dbname}.t1") + + tdSql.query(f"desc {dbname}.t1") col_lists_rows = tdSql.queryResult col_lists = [] for col_name in col_lists_rows: if col_name[0] =="ts": continue - + col_lists.append(col_name[0]) - + for col in col_lists: - for loop in range(100): + for loop in range(100): limit = randint(1,100) offset = randint(0,100) - self.check_tail_table("t1" , col , limit , offset) + self.check_tail_table(f"{dbname}.t1" , col , limit , offset) # tail for invalid params - - tdSql.error("select tail(c1,-10,10) from ct1") - tdSql.error("select tail(c1,10,10000) from ct1") - tdSql.error("select tail(c1,10,-100) from ct1") - tdSql.error("select tail(c1,100/2,10) from ct1") - tdSql.error("select tail(c1,5,10*2) from ct1") - tdSql.query("select tail(c1,100,100) from ct1") + + tdSql.error(f"select tail(c1,-10,10) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10000) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,-100) from {dbname}.ct1") + tdSql.error(f"select tail(c1,100/2,10) from {dbname}.ct1") + tdSql.error(f"select tail(c1,5,10*2) from {dbname}.ct1") + tdSql.query(f"select tail(c1,100,100) from {dbname}.ct1") tdSql.checkRows(0) - tdSql.query("select tail(c1,10,100) from ct1") + tdSql.query(f"select tail(c1,10,100) from {dbname}.ct1") tdSql.checkRows(0) - tdSql.error("select tail(c1,10,101) from ct1") - tdSql.query("select tail(c1,10,0) from ct1") - tdSql.query("select tail(c1,100,10) from ct1") + tdSql.error(f"select tail(c1,10,101) from {dbname}.ct1") + tdSql.query(f"select tail(c1,10,0) from {dbname}.ct1") + tdSql.query(f"select tail(c1,100,10) from {dbname}.ct1") tdSql.checkRows(3) - + # tail with super tags - tdSql.query("select tail(c1,10,10) from ct1") + tdSql.query(f"select tail(c1,10,10) from {dbname}.ct1") tdSql.checkRows(3) - tdSql.query("select tail(c1,10,10),tbname from ct1") - tdSql.query("select tail(c1,10,10),t1 from ct1") + tdSql.query(f"select tail(c1,10,10),tbname from {dbname}.ct1") + tdSql.query(f"select tail(c1,10,10),t1 from {dbname}.ct1") - # tail with common col - tdSql.query("select tail(c1,10,10) ,ts from ct1") - tdSql.query("select tail(c1,10,10) ,c1 from ct1") + # tail with common col + tdSql.query(f"select tail(c1,10,10) ,ts from {dbname}.ct1") + tdSql.query(f"select tail(c1,10,10) ,c1 from {dbname}.ct1") + + # tail with scalar function + tdSql.query(f"select tail(c1,10,10) ,abs(c1) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10) , tail(c2,10,10) from {dbname}.ct1") + tdSql.query(f"select tail(c1,10,10) , abs(c2)+2 from {dbname}.ct1") - # tail with scalar function - tdSql.query("select tail(c1,10,10) ,abs(c1) from ct1") - tdSql.error("select tail(c1,10,10) , tail(c2,10,10) from ct1") - tdSql.query("select tail(c1,10,10) , abs(c2)+2 from ct1") - # bug need fix for scalar value or compute again - # tdSql.error(" select tail(c1,10,10) , 123 from ct1") - # tdSql.error(" select abs(tail(c1,10,10)) from ct1") - # tdSql.error(" select abs(tail(c1,10,10)) + 2 from ct1") + # tdSql.error(f"select tail(c1,10,10) , 123 from {dbname}.ct1") + # tdSql.error(f"select abs(tail(c1,10,10)) from {dbname}.ct1") + # tdSql.error(f"select abs(tail(c1,10,10)) + 2 from {dbname}.ct1") - # tail with aggregate function - tdSql.error("select tail(c1,10,10) ,sum(c1) from ct1") - tdSql.error("select tail(c1,10,10) ,max(c1) from ct1") - tdSql.error("select tail(c1,10,10) ,csum(c1) from ct1") - tdSql.error("select tail(c1,10,10) ,count(c1) from ct1") + # tail with aggregate function + tdSql.error(f"select tail(c1,10,10) ,sum(c1) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10) ,max(c1) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10) ,csum(c1) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10) ,count(c1) from {dbname}.ct1") # tail with filter where - tdSql.query("select tail(c1,3,1) from ct4 where c1 is null") + tdSql.query(f"select tail(c1,3,1) from {dbname}.ct4 where c1 is null") tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, None) - tdSql.query("select tail(c1,3,2) from ct4 where c1 >2 order by 1") + tdSql.query(f"select tail(c1,3,2) from {dbname}.ct4 where c1 >2 order by 1") tdSql.checkData(0, 0, 5) tdSql.checkData(1, 0, 6) tdSql.checkData(2, 0, 7) - tdSql.query("select tail(c1,2,1) from ct4 where c2 between 0 and 99999 order by 1") + tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1") tdSql.checkData(0, 0, 1) tdSql.checkData(1, 0, 2) - # tail with union all - tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct1") + # tail with union all + tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct1") tdSql.checkRows(15) - tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct2 order by 1") + tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct2 order by 1") tdSql.checkRows(2) tdSql.checkData(0, 0, 0) tdSql.checkData(1, 0, 1) - tdSql.query("select tail(c2,2,1) from ct4 union all select abs(c2)/2 from ct4") + tdSql.query(f"select tail(c2,2,1) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4") tdSql.checkRows(14) - # tail with join - # prepare join datas with same ts + # tail with join + # prepare join datas with same ts - tdSql.execute(" use db ") - tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table tb1 using st1 tags(1)") - tdSql.execute(" create table tb2 using st1 tags(2)") + tdSql.execute(f" create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(f" create table {dbname}.tb1 using {dbname}.st1 tags(1)") + tdSql.execute(f" create table {dbname}.tb2 using {dbname}.st1 tags(2)") - tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table ttb1 using st2 tags(1)") - tdSql.execute(" create table ttb2 using st2 tags(2)") + tdSql.execute(f" create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(f" create table {dbname}.ttb1 using {dbname}.st2 tags(1)") + tdSql.execute(f" create table {dbname}.ttb2 using {dbname}.st2 tags(2)") start_ts = 1622369635000 # 2021-05-30 18:13:55 for i in range(10): ts_value = start_ts+i*1000 - tdSql.execute(f" insert into tb1 values({ts_value} , {i})") - tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})") - tdSql.query("select tail(tb2.num,3,2) from tb1, tb2 where tb1.ts=tb2.ts order by 1 desc") + tdSql.query(f"select tail(tb2.num,3,2) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts order by 1 desc") tdSql.checkRows(3) tdSql.checkData(0,0,7) tdSql.checkData(1,0,6) tdSql.checkData(2,0,5) # nest query - # tdSql.query("select tail(c1,2) from (select _rowts , c1 from ct1)") - tdSql.query("select c1 from (select tail(c1,2) c1 from ct4) order by 1 nulls first") + # tdSql.query(f"select tail(c1,2) from (select _rowts , c1 from {dbname}.ct1)") + tdSql.query(f"select c1 from (select tail(c1,2) c1 from {dbname}.ct4) order by 1 nulls first") tdSql.checkRows(2) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, 0) - tdSql.query("select sum(c1) from (select tail(c1,2) c1 from ct1)") + tdSql.query(f"select sum(c1) from (select tail(c1,2) c1 from {dbname}.ct1)") tdSql.checkRows(1) tdSql.checkData(0, 0, 18) - tdSql.query("select abs(c1) from (select tail(c1,2) c1 from ct1)") + tdSql.query(f"select abs(c1) from (select tail(c1,2) c1 from {dbname}.ct1)") tdSql.checkRows(2) tdSql.checkData(0, 0, 9) - + #partition by tbname - tdSql.query(" select tail(c1,5) from stb1 partition by tbname ") + tdSql.query(f"select tail(c1,5) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(10) - tdSql.query(" select tail(c1,3) from stb1 partition by tbname ") + tdSql.query(f"select tail(c1,3) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(6) - - # group by - tdSql.error("select tail(c1,2) from ct1 group by c1") - tdSql.error("select tail(c1,2) from ct1 group by tbname") + + # group by + tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by c1") + tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by tbname") # super table - tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname") - tdSql.query("select tail(c1,2) from stb1 partition by tbname") + tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname") + tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) - # bug need fix - # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname") + # bug need fix + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname") # tdSql.checkRows(4) - # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname") # tdSql.checkRows(4) - # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - tdSql.query("select tail(c1,2) from stb1 partition by tbname") + tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) - # # bug need fix - # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname ") + # # bug need fix + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ") # tdSql.checkRows(3) - # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ") # tdSql.checkRows(3) - # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ") # tdSql.checkRows(3) - tdSql.query(" select tail(t1,2) from stb1 ") + tdSql.query(f"select tail(t1,2) from {dbname}.stb1 ") tdSql.checkRows(2) - tdSql.query(" select tail(t1+c1,2) from stb1 ") + tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 ") tdSql.checkRows(2) - tdSql.query(" select tail(t1+c1,2) from stb1 partition by tbname ") + tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(4) - tdSql.query(" select tail(t1,2) from stb1 partition by tbname ") + tdSql.query(f"select tail(t1,2) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(4) - # nest query - tdSql.query(" select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from stb1 ) ") + # nest query + tdSql.query(f"select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ") tdSql.checkRows(2) tdSql.checkData(0,0,None) tdSql.checkData(1,0,9) - tdSql.query("select tail(t1,2) from (select _rowts , t1 , tbname from stb1 )") + tdSql.query(f"select tail(t1,2) from (select _rowts , t1 , tbname from {dbname}.stb1 )") tdSql.checkRows(2) tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - - tdSql.query("select tail(c2,2) from sub1_bound order by 1 desc") + + tdSql.query(f"select tail(c2,2) from {dbname}.sub1_bound order by 1 desc") tdSql.checkRows(2) tdSql.checkData(0,0,9223372036854775803) @@ -456,22 +450,22 @@ class TDTestCase: tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: tail basic query ============") + tdLog.printNoPrefix("==========step4: tail basic query ============") self.basic_tail_function() - tdLog.printNoPrefix("==========step5: tail boundary query ============") + tdLog.printNoPrefix("==========step5: tail boundary query ============") self.check_boundary_values() diff --git a/tests/system-test/2-query/tan.py b/tests/system-test/2-query/tan.py index da47c1c2b2..bb696757d2 100644 --- a/tests/system-test/2-query/tan.py +++ b/tests/system-test/2-query/tan.py @@ -9,48 +9,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - - def prepare_datas(self): + + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) - + tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -65,14 +63,14 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_result_auto_tan(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -90,7 +88,7 @@ class TDTestCase: if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): check_status = False elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False + check_status = False else: pass if not check_status: @@ -98,174 +96,174 @@ class TDTestCase: sys.exit(1) else: tdLog.info("tan value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def test_errors(self): + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select tan from t1", - # "select tan(-+--+c1 ) from t1", - # "select +-tan(c1) from t1", - # "select ++-tan(c1) from t1", - # "select ++--tan(c1) from t1", - # "select - -tan(c1)*0 from t1", - # "select tan(tbname+1) from t1 ", - "select tan(123--123)==1 from t1", - "select tan(c1) as 'd1' from t1", - "select tan(c1 ,c2) from t1", - "select tan(c1 ,NULL ) from t1", - "select tan(,) from t1;", - "select tan(tan(c1) ab from t1)", - "select tan(c1 ) as int from t1", - "select tan from stb1", - # "select tan(-+--+c1) from stb1", - # "select +-tan(c1) from stb1", - # "select ++-tan(c1) from stb1", - # "select ++--tan(c1) from stb1", - # "select - -tan(c1)*0 from stb1", - # "select tan(tbname+1) from stb1 ", - "select tan(123--123)==1 from stb1", - "select tan(c1) as 'd1' from stb1", - "select tan(c1 ,c2 ) from stb1", - "select tan(c1 ,NULL) from stb1", - "select tan(,) from stb1;", - "select tan(tan(c1) ab from stb1)", - "select tan(c1) as int from stb1" + f"select tan from {dbname}.t1", + # f"select tan(-+--+c1 ) from {dbname}.t1", + # f"select +-tan(c1) from {dbname}.t1", + # f"select ++-tan(c1) from {dbname}.t1", + # f"select ++--tan(c1) from {dbname}.t1", + # f"select - -tan(c1)*0 from {dbname}.t1", + # f"select tan(tbname+1) from {dbname}.t1 ", + f"select tan(123--123)==1 from {dbname}.t1", + f"select tan(c1) as 'd1' from {dbname}.t1", + f"select tan(c1 ,c2) from {dbname}.t1", + f"select tan(c1 ,NULL ) from {dbname}.t1", + f"select tan(,) from {dbname}.t1;", + f"select tan(tan(c1) ab from {dbname}.t1)", + f"select tan(c1 ) as int from {dbname}.t1", + f"select tan from {dbname}.stb1", + # f"select tan(-+--+c1) from {dbname}.stb1", + # f"select +-tan(c1) from {dbname}.stb1", + # f"select ++-tan(c1) from {dbname}.stb1", + # f"select ++--tan(c1) from {dbname}.stb1", + # f"select - -tan(c1)*0 from {dbname}.stb1", + # f"select tan(tbname+1) from {dbname}.stb1 ", + f"select tan(123--123)==1 from {dbname}.stb1", + f"select tan(c1) as 'd1' from {dbname}.stb1", + f"select tan(c1 ,c2 ) from {dbname}.stb1", + f"select tan(c1 ,NULL) from {dbname}.stb1", + f"select tan(,) from {dbname}.stb1;", + f"select tan(tan(c1) ab from {dbname}.stb1)", + f"select tan(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - - def support_types(self): + + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select tan(ts) from t1" , - "select tan(c7) from t1", - "select tan(c8) from t1", - "select tan(c9) from t1", - "select tan(ts) from ct1" , - "select tan(c7) from ct1", - "select tan(c8) from ct1", - "select tan(c9) from ct1", - "select tan(ts) from ct3" , - "select tan(c7) from ct3", - "select tan(c8) from ct3", - "select tan(c9) from ct3", - "select tan(ts) from ct4" , - "select tan(c7) from ct4", - "select tan(c8) from ct4", - "select tan(c9) from ct4", - "select tan(ts) from stb1" , - "select tan(c7) from stb1", - "select tan(c8) from stb1", - "select tan(c9) from stb1" , + f"select tan(ts) from {dbname}.t1" , + f"select tan(c7) from {dbname}.t1", + f"select tan(c8) from {dbname}.t1", + f"select tan(c9) from {dbname}.t1", + f"select tan(ts) from {dbname}.ct1" , + f"select tan(c7) from {dbname}.ct1", + f"select tan(c8) from {dbname}.ct1", + f"select tan(c9) from {dbname}.ct1", + f"select tan(ts) from {dbname}.ct3" , + f"select tan(c7) from {dbname}.ct3", + f"select tan(c8) from {dbname}.ct3", + f"select tan(c9) from {dbname}.ct3", + f"select tan(ts) from {dbname}.ct4" , + f"select tan(c7) from {dbname}.ct4", + f"select tan(c8) from {dbname}.ct4", + f"select tan(c9) from {dbname}.ct4", + f"select tan(ts) from {dbname}.stb1" , + f"select tan(c7) from {dbname}.stb1", + f"select tan(c8) from {dbname}.stb1", + f"select tan(c9) from {dbname}.stb1" , - "select tan(ts) from stbbb1" , - "select tan(c7) from stbbb1", + f"select tan(ts) from {dbname}.stbbb1" , + f"select tan(c7) from {dbname}.stbbb1", - "select tan(ts) from tbname", - "select tan(c9) from tbname" + f"select tan(ts) from {dbname}.tbname", + f"select tan(c9) from {dbname}.tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ - "select tan(c1) from t1", - "select tan(c2) from t1", - "select tan(c3) from t1", - "select tan(c4) from t1", - "select tan(c5) from t1", - "select tan(c6) from t1", + f"select tan(c1) from {dbname}.t1", + f"select tan(c2) from {dbname}.t1", + f"select tan(c3) from {dbname}.t1", + f"select tan(c4) from {dbname}.t1", + f"select tan(c5) from {dbname}.t1", + f"select tan(c6) from {dbname}.t1", - "select tan(c1) from ct1", - "select tan(c2) from ct1", - "select tan(c3) from ct1", - "select tan(c4) from ct1", - "select tan(c5) from ct1", - "select tan(c6) from ct1", + f"select tan(c1) from {dbname}.ct1", + f"select tan(c2) from {dbname}.ct1", + f"select tan(c3) from {dbname}.ct1", + f"select tan(c4) from {dbname}.ct1", + f"select tan(c5) from {dbname}.ct1", + f"select tan(c6) from {dbname}.ct1", - "select tan(c1) from ct3", - "select tan(c2) from ct3", - "select tan(c3) from ct3", - "select tan(c4) from ct3", - "select tan(c5) from ct3", - "select tan(c6) from ct3", + f"select tan(c1) from {dbname}.ct3", + f"select tan(c2) from {dbname}.ct3", + f"select tan(c3) from {dbname}.ct3", + f"select tan(c4) from {dbname}.ct3", + f"select tan(c5) from {dbname}.ct3", + f"select tan(c6) from {dbname}.ct3", - "select tan(c1) from stb1", - "select tan(c2) from stb1", - "select tan(c3) from stb1", - "select tan(c4) from stb1", - "select tan(c5) from stb1", - "select tan(c6) from stb1", + f"select tan(c1) from {dbname}.stb1", + f"select tan(c2) from {dbname}.stb1", + f"select tan(c3) from {dbname}.stb1", + f"select tan(c4) from {dbname}.stb1", + f"select tan(c5) from {dbname}.stb1", + f"select tan(c6) from {dbname}.stb1", - "select tan(c6) as alisb from stb1", - "select tan(c6) alisb from stb1", + f"select tan(c6) as alisb from {dbname}.stb1", + f"select tan(c6) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - - def basic_tan_function(self): - # basic query - tdSql.query("select c1 from ct3") + def basic_tan_function(self, dbname="db"): + + # basic query + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select tan(c1) from ct3") + tdSql.query(f"select tan(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c2) from ct3") + tdSql.query(f"select tan(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c3) from ct3") + tdSql.query(f"select tan(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c4) from ct3") + tdSql.query(f"select tan(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c5) from ct3") + tdSql.query(f"select tan(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c6) from ct3") + tdSql.query(f"select tan(c6) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select tan(c1) from t1") + tdSql.query(f"select tan(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 1.557407725) tdSql.checkData(3 , 0, -0.142546543) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from t1") - + self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from {dbname}.t1") + # used for sub table - tdSql.query("select c2 ,tan(c2) from ct1") + tdSql.query(f"select c2 ,tan(c2) from {dbname}.ct1") tdSql.checkData(0, 1, -0.226288661) tdSql.checkData(1 , 1, 0.670533806) tdSql.checkData(3 , 1, -1.325559275) tdSql.checkData(4 , 1, 0.000000000) - tdSql.query("select c1, c5 ,tan(c5) from ct4") + tdSql.query(f"select c1, c5 ,tan(c5) from {dbname}.ct4") tdSql.checkData(0 , 2, None) tdSql.checkData(1 , 2, -0.605942929) tdSql.checkData(2 , 2, 11.879355609) tdSql.checkData(3 , 2, 0.395723765) tdSql.checkData(5 , 2, None) - self.check_result_auto_tan( "select c1, c2, c3 , c4, c5 from ct1", "select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from ct1") - + self.check_result_auto_tan( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from {dbname}.ct1") + # nest query for tan functions - tdSql.query("select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from ct1;") + tdSql.query(f"select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 88) tdSql.checkData(0 , 1 , 0.035420501) tdSql.checkData(0 , 2 , 0.035435322) @@ -281,52 +279,52 @@ class TDTestCase: tdSql.checkData(11 , 2 , -0.040227928) tdSql.checkData(11 , 3 , -0.040249642) - # used for stable table - - tdSql.query("select tan(c1) from stb1") + # used for stable table + + tdSql.query(f"select tan(c1) from {dbname}.stb1") tdSql.checkRows(25) - + # used for not exists table - tdSql.error("select tan(c1) from stbbb1") - tdSql.error("select tan(c1) from tbname") - tdSql.error("select tan(c1) from ct5") + tdSql.error(f"select tan(c1) from {dbname}.stbbb1") + tdSql.error(f"select tan(c1) from {dbname}.tbname") + tdSql.error(f"select tan(c1) from {dbname}.ct5") + + # mix with common col + tdSql.query(f"select c1, tan(c1) from {dbname}.ct1") + tdSql.query(f"select c2, tan(c2) from {dbname}.ct4") - # mix with common col - tdSql.query("select c1, tan(c1) from ct1") - tdSql.query("select c2, tan(c2) from ct4") - # mix with common functions - tdSql.query("select c1, tan(c1),tan(c1), tan(tan(c1)) from ct4 ") + tdSql.query(f"select c1, tan(c1),tan(c1), tan(tan(c1)) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,-0.291006191) tdSql.checkData(3 , 2 ,-0.291006191) tdSql.checkData(3 , 3 ,-0.299508909) - tdSql.query("select c1, tan(c1),c5, floor(c5) from stb1 ") + tdSql.query(f"select c1, tan(c1),c5, floor(c5) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, tan(c1),c5, count(c5) from stb1 ") - tdSql.error("select c1, tan(c1),c5, count(c5) from ct1 ") - tdSql.error("select tan(c1), count(c5) from stb1 ") - tdSql.error("select tan(c1), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select tan(c1), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select tan(c1), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") - - # # bug fix for compute - tdSql.query("select c1, tan(c1) -0 ,tan(c1-4)-0 from ct4 ") + + # # bug fix for compute + tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-4)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -334,7 +332,7 @@ class TDTestCase: tdSql.checkData(1, 1, -6.799711455) tdSql.checkData(1, 2, 1.157821282) - tdSql.query(" select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from ct4") + tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -342,35 +340,33 @@ class TDTestCase: tdSql.checkData(1, 1, -6.799711455) tdSql.checkData(1, 2, -21.815112681) - tdSql.query("select c1, tan(c1), c2, tan(c2), c3, tan(c3) from ct1") + tdSql.query(f"select c1, tan(c1), c2, tan(c2), c3, tan(c3) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, tan(100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, math.tan(100000000)) - - tdSql.query("select c1, tan(10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, math.tan(10000000000000)) - tdSql.query("select c1, tan(10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, tan(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, tan(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(1, 1, math.tan(10000000000000000000000000.0)) - tdSql.query("select c1, tan(10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, tan(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, tan(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000.0)) - tdSql.query("select c1, tan(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, tan(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000000000.0)) - tdSql.query("select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -378,7 +374,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,-7.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -386,7 +382,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,-3.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>tan(c1) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>tan(c1) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,8) tdSql.checkData(0,1,88888) @@ -394,22 +390,17 @@ class TDTestCase: tdSql.checkData(0,3,8.000000000) tdSql.checkData(0,4,7.900000000) tdSql.checkData(0,5,-7.000000000) - - def pow_Arithmetic(self): - pass - - def check_boundary_values(self): + + def check_boundary_values(self, dbname="bound_test"): PI=3.1415926 - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) @@ -425,14 +416,14 @@ class TDTestCase: tdSql.error( f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from sub1_bound") - - self.check_result_auto_tan( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from sub1_bound") + self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from {dbname}.sub1_bound") + + self.check_result_auto_tan( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from {dbname}.sub1_bound") + + self.check_result_auto_tan(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select tan(abs(c1)) from {dbname}.sub1_bound" ) - self.check_result_auto_tan("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select tan(abs(c1)) from sub1_bound" ) - # check basic elem for table per row - tdSql.query("select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from sub1_bound ") + tdSql.query(f"select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.tan(2147483647)) tdSql.checkData(0,1,math.tan(9223372036854775807)) tdSql.checkData(0,2,math.tan(32767)) @@ -450,76 +441,74 @@ class TDTestCase: tdSql.checkData(3,4,math.tan(339999995214436424907732413799364296704.00000)) # check + - * / in functions - tdSql.query("select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from sub1_bound ") + tdSql.query(f"select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.tan(2147483648.000000000)) tdSql.checkData(0,1,math.tan(9223372036854775807)) tdSql.checkData(0,2,math.tan(32767.000000000)) tdSql.checkData(0,3,math.tan(63.500000000)) - tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);") - tdSql.execute(f'create table tb1 using st tags (1)') - tdSql.execute(f'create table tb2 using st tags (2)') - tdSql.execute(f'create table tb3 using st tags (3)') - tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) + tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);") + tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)') + tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)') + tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})') + tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})') - tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) + tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})') + tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})') for i in range(100): - tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2)) + tdSql.execute(f'insert into tb3 values (now()+{i}s, {PI*(5+i)/2}, {PI*(5+i)/2})') - self.check_result_auto_tan("select num1,num2 from tb3;" , "select tan(num1),tan(num2) from tb3") + self.check_result_auto_tan(f"select num1,num2 from {dbname}.tb3;" , f"select tan(num1),tan(num2) from {dbname}.tb3") - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto_tan( " select c5 from stb1 order by ts " , "select tan(c5) from stb1 order by ts" ) - self.check_result_auto_tan( " select c5 from stb1 order by tbname " , "select tan(c5) from stb1 order by tbname" ) - self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" ) + def support_super_table_test(self, dbname="db"): + self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by ts " , f"select tan(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by tbname " , f"select tan(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_tan( " select t1,c5 from stb1 order by ts " , "select tan(t1), tan(c5) from stb1 order by ts" ) - self.check_result_auto_tan( " select t1,c5 from stb1 order by tbname " , "select tan(t1) ,tan(c5) from stb1 order by tbname" ) - self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) ,tan(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) , tan(c5) from stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select tan(t1), tan(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) , tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) pass - - + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: tan basic query ============") + tdLog.printNoPrefix("==========step4: tan basic query ============") self.basic_tan_function() - tdLog.printNoPrefix("==========step5: big number tan query ============") + tdLog.printNoPrefix("==========step5: big number tan query ============") self.test_big_number() - - tdLog.printNoPrefix("==========step6: tan boundary query ============") + tdLog.printNoPrefix("==========step6: tan boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step7: tan filter query ============") + tdLog.printNoPrefix("==========step7: tan filter query ============") self.abs_func_filter() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index d741b6745b..a91ec01a3b 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -156,6 +156,18 @@ python3 ./test.py -f 2-query/spread.py python3 ./test.py -f 2-query/spread.py -R python3 ./test.py -f 2-query/sqrt.py # python3 ./test.py -f 2-query/sqrt.py -R +python3 ./test.py -f 2-query/statecount.py +python3 ./test.py -f 2-query/statecount.py -R +python3 ./test.py -f 2-query/stateduration.py +python3 ./test.py -f 2-query/stateduration.py -R +python3 ./test.py -f 2-query/substr.py +python3 ./test.py -f 2-query/substr.py -R +python3 ./test.py -f 2-query/sum.py +python3 ./test.py -f 2-query/sum.py -R +python3 ./test.py -f 2-query/tail.py +python3 ./test.py -f 2-query/tail.py -R +python3 ./test.py -f 2-query/tan.py +python3 ./test.py -f 2-query/tan.py -R python3 ./test.py -f 1-insert/update_data.py @@ -166,7 +178,6 @@ python3 ./test.py -f 2-query/varchar.py python3 ./test.py -f 2-query/upper.py python3 ./test.py -f 2-query/lower.py python3 ./test.py -f 2-query/join2.py -python3 ./test.py -f 2-query/substr.py python3 ./test.py -f 2-query/union.py python3 ./test.py -f 2-query/union1.py python3 ./test.py -f 2-query/concat2.py @@ -182,7 +193,6 @@ python3 ./test.py -f 2-query/json_tag.py python3 ./test.py -f 2-query/top.py python3 ./test.py -f 2-query/log.py -python3 ./test.py -f 2-query/tan.py # python3 ./test.py -f 2-query/nestedQuery.py # TD-15983 subquery output duplicate name column. # Please Xiangyang Guo modify the following script @@ -193,9 +203,6 @@ python3 ./test.py -f 2-query/csum.py python3 ./test.py -f 2-query/mavg.py python3 ./test.py -f 2-query/function_diff.py python3 ./test.py -f 2-query/unique.py -python3 ./test.py -f 2-query/stateduration.py -python3 ./test.py -f 2-query/statecount.py -python3 ./test.py -f 2-query/tail.py python3 ./test.py -f 2-query/ttl_comment.py python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 2-query/queryQnode.py From 8ae2907dcbbf5dee536df8158a95ec4fab68a12c Mon Sep 17 00:00:00 2001 From: cpwu Date: Thu, 18 Aug 2022 11:14:20 +0800 Subject: [PATCH 10/72] fix sql --- tests/pytest/util/sql.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index c0c86f0d85..69e7b14d8f 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -225,25 +225,21 @@ class TDSql: # suppose user want to check nanosecond timestamp if a longer data passed if (len(data) >= 28): if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data): - tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") else: if self.queryResult[row][col] == _parse_datetime(data): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") return if str(self.queryResult[row][col]) == str(data): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") return + elif isinstance(data, float): if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: - tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: - tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) From bea875aea7d4e2fdeff8d5f57eee60f02faab22d Mon Sep 17 00:00:00 2001 From: cpwu Date: Fri, 19 Aug 2022 17:46:24 +0800 Subject: [PATCH 11/72] add case to support rest API --- tests/pytest/util/sql.py | 2 +- tests/system-test/2-query/function_diff.py | 61 ++- tests/system-test/2-query/join2.py | 4 +- tests/system-test/2-query/log.py | 164 ++------ tests/system-test/2-query/lower.py | 71 ++-- tests/system-test/2-query/mavg.py | 40 +- tests/system-test/2-query/nestedQuery_str.py | 3 - tests/system-test/2-query/sin.py | 69 ++-- tests/system-test/2-query/sqrt.py | 26 +- tests/system-test/2-query/tan.py | 35 +- tests/system-test/2-query/timetruncate.py | 44 ++- tests/system-test/2-query/tsbsQuery.py | 114 +++--- tests/system-test/2-query/ttl_comment.py | 64 +-- tests/system-test/2-query/twa.py | 67 ++-- tests/system-test/2-query/union.py | 74 ++-- tests/system-test/2-query/unique.py | 396 +++++++++---------- tests/system-test/2-query/upper.py | 70 ++-- tests/system-test/2-query/varchar.py | 27 +- tests/system-test/fulltest.sh | 57 ++- 19 files changed, 635 insertions(+), 753 deletions(-) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 69e7b14d8f..b320cf5995 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -311,7 +311,7 @@ class TDSql: tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args) def __check_equal(self, elm, expect_elm): - if not type(elm) in(list, tuple) and elm == expect_elm: + if elm == expect_elm: return True if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple): if len(elm) != len(expect_elm): diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py index fd5d6ea1cf..946453bb23 100644 --- a/tests/system-test/2-query/function_diff.py +++ b/tests/system-test/2-query/function_diff.py @@ -193,43 +193,38 @@ class TDTestCase: # case17: only support normal table join case17 = { - "col": "t1.c1", - "table_expr": "t1, t2", - "condition": "where t1.ts=t2.ts" + "col": "table1.c1 ", + "table_expr": "db.t1 as table1, db.t2 as table2", + "condition": "where table1.ts=table2.ts" } self.checkdiff(**case17) - # case18~19: with group by - # case18 = { - # "table_expr": "db.t1", - # "condition": "group by c6" - # } - # self.checkdiff(**case18) + # case18~19: with group by , function diff not support group by + case19 = { - "table_expr": "db.stb1", + "table_expr": "db.stb1 where tbname =='t0' ", "condition": "partition by tbname order by tbname" # partition by tbname } self.checkdiff(**case19) - # # case20~21: with order by - # case20 = {"condition": "order by ts"} - # self.checkdiff(**case20) + # case20~21: with order by , Not a single-group group function - # # case22: with union + # case22: with union # case22 = { - # "condition": "union all select diff(c1) from t2" + # "condition": "union all select diff(c1) from db.t2 " # } # self.checkdiff(**case22) + tdSql.query("select count(c1) from db.t1 union all select count(c1) from db.t2") # case23: with limit/slimit case23 = { "condition": "limit 1" } self.checkdiff(**case23) - # case24 = { - # "table_expr": "db.stb1", - # "condition": "group by tbname slimit 1 soffset 1" - # } - # self.checkdiff(**case24) + case24 = { + "table_expr": "db.stb1", + "condition": "partition by tbname order by tbname slimit 1 soffset 1" + } + self.checkdiff(**case24) pass @@ -284,9 +279,9 @@ class TDTestCase: tdSql.query(self.diff_query_form(alias=", c2")) # mix with other 1 # tdSql.error(self.diff_query_form(table_expr="db.stb1")) # select stb directly stb_join = { - "col": "stb1.c1", - "table_expr": "stb1, stb2", - "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + "col": "stable1.c1", + "table_expr": "db.stb1 as stable1, db.stb2 as stable2", + "condition": "where stable1.ts=stable2.ts and stable1.st1=stable2.st2 order by stable1.ts" } tdSql.query(self.diff_query_form(**stb_join)) # stb join interval_sql = { @@ -315,20 +310,20 @@ class TDTestCase: for i in range(tbnum): for j in range(data_row): tdSql.execute( - f"insert into t{i} values (" + f"insert into db.t{i} values (" f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" ) tdSql.execute( - f"insert into t{i} values (" + f"insert into db.t{i} values (" f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" ) tdSql.execute( - f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + f"insert into db.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" ) pass @@ -349,8 +344,8 @@ class TDTestCase: "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" ) for i in range(tbnum): - tdSql.execute(f"create table t{i} using db.stb1 tags({i})") - tdSql.execute(f"create table tt{i} using db.stb2 tags({i})") + tdSql.execute(f"create table db.t{i} using db.stb1 tags({i})") + tdSql.execute(f"create table db.tt{i} using db.stb2 tags({i})") pass def diff_support_stable(self): @@ -398,8 +393,8 @@ class TDTestCase: tdLog.printNoPrefix("######## insert only NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5})") self.diff_current_query() self.diff_error_query() @@ -430,9 +425,9 @@ class TDTestCase: tdLog.printNoPrefix("######## insert data mix with NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") self.diff_current_query() self.diff_error_query() diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py index 5533cb840e..a3818ed77d 100644 --- a/tests/system-test/2-query/join2.py +++ b/tests/system-test/2-query/join2.py @@ -52,12 +52,12 @@ class TDTestCase: return query_condition - def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False, alias_tb1="tb1", alias_tb2="tb2"): table_reference = tb_list[0] join_condition = table_reference join = "inner join" if INNER else "join" for i in range(len(tb_list[1:])): - join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + join_condition += f" as {alias_tb1} {join} {tb_list[i+1]} as {alias_tb2} on {alias_tb1}.{filter}={alias_tb2}.{filter}" return join_condition diff --git a/tests/system-test/2-query/log.py b/tests/system-test/2-query/log.py index e304284bf9..358d2b9551 100644 --- a/tests/system-test/2-query/log.py +++ b/tests/system-test/2-query/log.py @@ -65,8 +65,7 @@ class TDTestCase: ''' ) - - def check_result_auto_log(self ,origin_query , log_query): + def check_result_auto_log(self ,base , origin_query , log_query): log_result = tdSql.getResult(log_query) origin_result = tdSql.getResult(origin_query) @@ -76,113 +75,30 @@ class TDTestCase: for row in origin_result: row_check = [] for elem in row: - if elem == None: - elem = None - elif elem >0: - elem = math.log(elem) - elif elem <=0: + if base ==1: elem = None + else: + if elem == None: + elem = None + elif elem ==1: + elem = 0.0 + elif elem >0 and elem !=1 : + if base==None : + elem = math.log(elem ) + else: + print(base , elem) + elem = math.log(elem , base) + elif elem <=0: + elem = None + row_check.append(elem) auto_result.append(row_check) - check_status = True + tdSql.query(log_query) for row_index , row in enumerate(log_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] != elem: - check_status = False - if not check_status: - tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query ) - sys.exit(1) - else: - tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query ) - - def check_result_auto_log2(self ,origin_query , log_query): - - log_result = tdSql.getResult(log_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - elif elem >0: - elem = math.log(elem,2) - elif elem <=0: - elem = None - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(log_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] != elem: - check_status = False - if not check_status: - tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query ) - sys.exit(1) - else: - tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query ) - - def check_result_auto_log1(self ,origin_query , log_query): - log_result = tdSql.getResult(log_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - elif elem >0: - elem = None - elif elem <=0: - elem = None - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(log_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] != elem: - check_status = False - if not check_status: - tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query ) - sys.exit(1) - else: - tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query ) - def check_result_auto_log__10(self ,origin_query , log_query): - log_result = tdSql.getResult(log_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - elif elem >0: - elem = None - elif elem <=0: - elem = None - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(log_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] != elem: - check_status = False - if not check_status: - tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query ) - sys.exit(1) - else: - tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query ) - + tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index]) + def test_errors(self, dbname="db"): error_sql_lists = [ f"select log from {dbname}.t1", @@ -328,10 +244,10 @@ class TDTestCase: tdSql.checkData(3 , 0, 1.098612289) tdSql.checkData(4 , 0, 1.386294361) - self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1") - self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1") - self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1") - self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from {dbname}.t1") + self.check_result_auto_log( None , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1") + self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1") + self.check_result_auto_log( 1, f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1") + self.check_result_auto_log( 10 ,f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,10), log(c2 ,10) ,log(c3, 10), log(c4 ,10), log(c5 ,10) from {dbname}.t1") # used for sub table tdSql.query(f"select c1 ,log(c1 ,3) from {dbname}.ct1") @@ -349,9 +265,9 @@ class TDTestCase: tdSql.checkData(3 , 2, 0.147315235) tdSql.checkData(4 , 2, None) - self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1") - self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1") - self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from {dbname}.ct1") + self.check_result_auto_log( None ,f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1") + self.check_result_auto_log( 2, f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1") + self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) from {dbname}.ct1") # nest query for log functions tdSql.query(f"select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from {dbname}.ct1;") @@ -585,15 +501,15 @@ class TDTestCase: tdSql.error( f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound") - self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound") - self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from {dbname}.sub1_bound") + self.check_result_auto_log(None , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound") + self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound") + self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) ,log(c6,10) from {dbname}.sub1_bound") - self.check_result_auto_log2( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound") - self.check_result_auto_log( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound") + self.check_result_auto_log( 2 , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound") + self.check_result_auto_log( None , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound") - self.check_result_auto_log2(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" ) + self.check_result_auto_log(2 , f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" ) # check basic elem for table per row tdSql.query(f"select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from {dbname}.sub1_bound ") @@ -647,15 +563,15 @@ class TDTestCase: def support_super_table_test(self, dbname="db"): - self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" ) - self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" ) - self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" ) - self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" ) - self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_log( 2 ,f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() diff --git a/tests/system-test/2-query/lower.py b/tests/system-test/2-query/lower.py index 0917fb63fc..0e33e3834e 100644 --- a/tests/system-test/2-query/lower.py +++ b/tests/system-test/2-query/lower.py @@ -96,16 +96,16 @@ class TDTestCase: return sqls - def __test_current(self): + def __test_current(self, dbname="db"): tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__lower_current_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__lower_err_check(tb): @@ -113,22 +113,20 @@ class TDTestCase: tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") - def all_test(self): - self.__test_current() - self.__test_error() + def all_test(self, dbname="db"): + self.__test_current(dbname) + self.__test_error(dbname) - - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) + ) tags (tag1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -138,78 +136,78 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values - ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } ) - ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } ) + f'''insert into {dbname}.ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, - { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000} + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} ) ( { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, - { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000} + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} ) ''' ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, - { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, - { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, - "binary_{i}", "nchar_{i}", { now_time - 1000 * i } ) + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, - "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, - "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) @@ -227,10 +225,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) - - tdSql.execute("use db") + tdSql.execute("flush database db") tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py index 0995dfc6ff..b52217af9a 100644 --- a/tests/system-test/2-query/mavg.py +++ b/tests/system-test/2-query/mavg.py @@ -307,7 +307,7 @@ class TDTestCase: pass - def mavg_current_query(self) : + def mavg_current_query(self, dbname="db") : # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) @@ -325,17 +325,17 @@ class TDTestCase: case6 = {"col": "c9"} self.checkmavg(**case6) - # # case7~8: nested query - # case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"} - # self.checkmavg(**case7) - # case8 = {"table_expr": f"(select mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"} + # case7~8: nested query + case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"} + self.checkmavg(**case7) + # case8 = {"table_expr": f"(select _c0, mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"} # self.checkmavg(**case8) # case9~10: mix with tbname/ts/tag/col - # case9 = {"alias": ", tbname"} - # self.checkmavg(**case9) - # case10 = {"alias": ", _c0"} - # self.checkmavg(**case10) + case9 = {"alias": ", tbname"} + self.checkmavg(**case9) + case10 = {"alias": ", _c0"} + self.checkmavg(**case10) # case11 = {"alias": ", st1"} # self.checkmavg(**case11) # case12 = {"alias": ", c1"} @@ -356,7 +356,7 @@ class TDTestCase: # case17: only support normal table join case17 = { "col": "t1.c1", - "table_expr": "t1, t2", + "table_expr": f"{dbname}.t1 t1, {dbname}.t2 t2", "condition": "where t1.ts=t2.ts" } self.checkmavg(**case17) @@ -367,14 +367,14 @@ class TDTestCase: # } # self.checkmavg(**case19) - # case20~21: with order by + # # case20~21: with order by # case20 = {"condition": "order by ts"} # self.checkmavg(**case20) - #case21 = { - # "table_expr": f"{dbname}.stb1", - # "condition": "group by tbname order by tbname" - #} - #self.checkmavg(**case21) + case21 = { + "table_expr": f"{dbname}.stb1", + "condition": "group by tbname order by tbname" + } + self.checkmavg(**case21) # # case22: with union # case22 = { @@ -398,7 +398,7 @@ class TDTestCase: pass - def mavg_error_query(self) -> None : + def mavg_error_query(self, dbname="db") -> None : # unusual test # form test @@ -419,9 +419,9 @@ class TDTestCase: err8 = {"table_expr": ""} self.checkmavg(**err8) # no table_expr - # err9 = {"col": "st1"} + err9 = {"col": "st1"} # self.checkmavg(**err9) # col: tag - # err10 = {"col": 1} + err10 = {"col": 1} # self.checkmavg(**err10) # col: value err11 = {"col": "NULL"} self.checkmavg(**err11) # col: NULL @@ -496,7 +496,7 @@ class TDTestCase: # "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" # } # self.checkmavg(**err44) # stb join - tdSql.query("select mavg( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;") + tdSql.query(f"select mavg( stb1.c1 , 1 ) from {dbname}.stb1 stb1, {dbname}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;") err45 = { "condition": "where ts>0 and ts < now interval(1h) fill(next)" } diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py index 0d40ef8147..931ff873dc 100755 --- a/tests/system-test/2-query/nestedQuery_str.py +++ b/tests/system-test/2-query/nestedQuery_str.py @@ -24,9 +24,6 @@ from util.dnodes import tdDnodes from util.dnodes import * class TDTestCase: - updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) diff --git a/tests/system-test/2-query/sin.py b/tests/system-test/2-query/sin.py index ae5e070a47..a1ba335487 100644 --- a/tests/system-test/2-query/sin.py +++ b/tests/system-test/2-query/sin.py @@ -67,6 +67,7 @@ class TDTestCase: def check_result_auto_sin(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) + origin_result = tdSql.getResult(origin_query) auto_result =[] @@ -80,23 +81,11 @@ class TDTestCase: elem = math.sin(elem) row_check.append(elem) auto_result.append(row_check) - - check_status = True - + tdSql.query(pow_query) for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] is None and elem: - check_status = False - elif auto_result[row_index][col_index] is not None and (auto_result[row_index][col_index] - elem > 0.00000001): - print("====,auto_result[row_index][col_index]:",auto_result[row_index][col_index], "elem:", elem) - check_status = False - else: - pass - if not check_status: - tdLog.notice("sin function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("sin value check pass , it work as expected ,sql is \"%s\" "%pow_query ) + tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index]) + def test_errors(self, dbname="db"): error_sql_lists = [ @@ -393,7 +382,7 @@ class TDTestCase: tdSql.checkData(0,4,-0.100000000) tdSql.checkData(0,5,0.000000000) - def check_boundary_values(self, dbname="db"): + def check_boundary_values(self, dbname="testdb"): PI=3.1415926 @@ -418,7 +407,7 @@ class TDTestCase: tdSql.error( f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from {dbname}.sub1_bound") + self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)) from {dbname}.sub1_bound") self.check_result_auto_sin( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from {dbname}.sub1_bound") @@ -449,24 +438,21 @@ class TDTestCase: tdSql.checkData(0,2,math.sin(32767.000000000)) tdSql.checkData(0,3,math.sin(63.500000000)) - tdSql.execute("create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);") + tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);") tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)') tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)') tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)') - tdSql.execute('insert into {dbname}.tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into {dbname}.tb1 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into {dbname}.tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into {dbname}.tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into {dbname}.tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) + tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})') + tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})') - tdSql.execute('insert into {dbname}.tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into {dbname}.tb2 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into {dbname}.tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into {dbname}.tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into {dbname}.tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) - - for i in range(100): - tdSql.execute('insert into {dbname}.tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2)) + tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})') + tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})') self.check_result_auto_sin(f"select num1,num2 from {dbname}.tb3;" , f"select sin(num1),sin(num2) from {dbname}.tb3") @@ -500,19 +486,20 @@ class TDTestCase: self.basic_sin_function() - tdLog.printNoPrefix("==========step5: big number sin query ============") - - self.test_big_number() - - - tdLog.printNoPrefix("==========step6: sin boundary query ============") - - self.check_boundary_values() - - tdLog.printNoPrefix("==========step7: sin filter query ============") + tdLog.printNoPrefix("==========step5: sin filter query ============") self.abs_func_filter() + tdLog.printNoPrefix("==========step6: big number sin query ============") + + self.test_big_number() + + + tdLog.printNoPrefix("==========step7: sin boundary query ============") + + self.check_boundary_values() + + tdLog.printNoPrefix("==========step8: check sin result of stable query ============") self.support_super_table_test() diff --git a/tests/system-test/2-query/sqrt.py b/tests/system-test/2-query/sqrt.py index 4401a23dbf..9597375885 100644 --- a/tests/system-test/2-query/sqrt.py +++ b/tests/system-test/2-query/sqrt.py @@ -83,21 +83,11 @@ class TDTestCase: row_check.append(elem) auto_result.append(row_check) - check_status = True - + tdSql.query(pow_query) for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False - else: - pass - if not check_status: - tdLog.notice("sqrt function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query ) + tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index]) + def test_errors(self, dbname="db"): error_sql_lists = [ @@ -452,19 +442,19 @@ class TDTestCase: ) tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.sub1_bound") diff --git a/tests/system-test/2-query/tan.py b/tests/system-test/2-query/tan.py index bb696757d2..683cee37ff 100644 --- a/tests/system-test/2-query/tan.py +++ b/tests/system-test/2-query/tan.py @@ -59,7 +59,7 @@ class TDTestCase: ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) - ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999, -999, -99, -9.99, -99999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) @@ -80,22 +80,10 @@ class TDTestCase: elem = math.tan(elem) row_check.append(elem) auto_result.append(row_check) - - check_status = True - + tdSql.query(pow_query) for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False - else: - pass - if not check_status: - tdLog.notice("tan function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("tan value check pass , it work as expected ,sql is \"%s\" "%pow_query ) + tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index] ) def test_errors(self, dbname="db"): error_sql_lists = [ @@ -244,7 +232,7 @@ class TDTestCase: tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from {dbname}.t1") + self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.t1", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.t1") # used for sub table tdSql.query(f"select c2 ,tan(c2) from {dbname}.ct1") @@ -402,21 +390,21 @@ class TDTestCase: ) tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from {dbname}.sub1_bound") + self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.sub1_bound") self.check_result_auto_tan( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from {dbname}.sub1_bound") @@ -463,9 +451,6 @@ class TDTestCase: tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})') tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})') - for i in range(100): - tdSql.execute(f'insert into tb3 values (now()+{i}s, {PI*(5+i)/2}, {PI*(5+i)/2})') - self.check_result_auto_tan(f"select num1,num2 from {dbname}.tb3;" , f"select tan(num1),tan(num2) from {dbname}.tb3") def support_super_table_test(self, dbname="db"): diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py index 3551d8ee2c..357d9fa957 100644 --- a/tests/system-test/2-query/timetruncate.py +++ b/tests/system-test/2-query/timetruncate.py @@ -25,6 +25,7 @@ class TDTestCase: self.ntbname = f'{self.dbname}.ntb' self.stbname = f'{self.dbname}.stb' self.ctbname = f'{self.dbname}.ctb' + def check_ms_timestamp(self,unit,date_time): if unit.lower() == '1a': for i in range(len(self.ts_str)): @@ -45,11 +46,12 @@ class TDTestCase: elif unit.lower() == '1d': for i in range(len(self.ts_str)): ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000) elif unit.lower() == '1w': for i in range(len(self.ts_str)): ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0])) tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24/7)*7*24*60*60*1000) + def check_us_timestamp(self,unit,date_time): if unit.lower() == '1u': for i in range(len(self.ts_str)): @@ -74,47 +76,58 @@ class TDTestCase: elif unit.lower() == '1d': for i in range(len(self.ts_str)): ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 ) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 ) elif unit.lower() == '1w': for i in range(len(self.ts_str)): ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0])) tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000) - def check_ns_timestamp(self,unit,date_time): + + def check_ns_timestamp(self, unit, date_time:list): if unit.lower() == '1b': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i])) + ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i])) elif unit.lower() == '1u': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000)*1000) + ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000)*1000) elif unit.lower() == '1a': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000)*1000*1000) + ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000)*1000*1000) elif unit.lower() == '1s': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000)*1000*1000*1000) + ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000)*1000*1000*1000) elif unit.lower() == '1m': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60)*60*1000*1000*1000) + ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000/60)*60*1000*1000*1000) elif unit.lower() == '1h': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 ) + ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 ) elif unit.lower() == '1d': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 ) + ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 ) elif unit.lower() == '1w': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000) + ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000) + def check_tb_type(self,unit,tb_type): - if tb_type.lower() == 'ntb': + if tb_type.lower() == 'ntb': tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}') elif tb_type.lower() == 'ctb': tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}') elif tb_type.lower() == 'stb': tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}') + def data_check(self,date_time,precision,tb_type): for unit in self.time_unit: if (unit.lower() == '1u' and precision.lower() == 'ms') or (unit.lower() == '1b' and precision.lower() == 'us') or (unit.lower() == '1b' and precision.lower() == 'ms'): - if tb_type.lower() == 'ntb': + if tb_type.lower() == 'ntb': tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}') elif tb_type.lower() == 'ctb': tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}') @@ -139,16 +152,19 @@ class TDTestCase: tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}') elif tb_type.lower() == 'stb': tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}') + def function_check_ntb(self): for precision in self.db_param_precision: tdSql.execute(f'drop database if exists {self.dbname}') tdSql.execute(f'create database {self.dbname} precision "{precision}"') + tdLog.info(f"=====now is in a {precision} database=====") tdSql.execute(f'use {self.dbname}') tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)') for ts in self.ts_str: tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)') date_time = self.get_time.time_transform(self.ts_str,precision) self.data_check(date_time,precision,'ntb') + def function_check_stb(self): for precision in self.db_param_precision: tdSql.execute(f'drop database if exists {self.dbname}') @@ -161,9 +177,11 @@ class TDTestCase: date_time = self.get_time.time_transform(self.ts_str,precision) self.data_check(date_time,precision,'ctb') self.data_check(date_time,precision,'stb') + def run(self): self.function_check_ntb() self.function_check_stb() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py index 617f7e7464..04a80a74ad 100644 --- a/tests/system-test/2-query/tsbsQuery.py +++ b/tests/system-test/2-query/tsbsQuery.py @@ -22,7 +22,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), True) + tdSql.init(conn.cursor(), False) def create_ctable(self,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1): tsql.execute("use %s" %dbName) @@ -32,16 +32,16 @@ class TDTestCase: for i in range(ctbNum): tagValue = 'beijing' if (i % 10 == 0): - sql += " %s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i)) + sql += f" {dbName}.%s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i)) else: model = 'H-%d'%i - sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i)) + sql += f" {dbName}.%s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i)) if (i > 0) and (i%1000 == 0): tsql.execute(sql) sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -54,32 +54,32 @@ class TDTestCase: startTs = int(round(t * 1000)) for i in range(ctbNum): - sql += " %s%d values "%(ctbPrefix,i) + sql += f" {dbName}.%s%d values "%(ctbPrefix,i) for j in range(rowsPerTbl): if(ctbPrefix=="rct"): sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}) " elif ( ctbPrefix=="dct"): status= random.randint(0,1) - sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) " + sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) " # tdLog.debug("1insert sql:%s"%sql) if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): # tdLog.debug("2insert sql:%s"%sql) tsql.execute(sql) if j < rowsPerTbl - 1: - sql = "insert into %s%d values " %(ctbPrefix,i) + sql = f"insert into {dbName}.%s%d values " %(ctbPrefix,i) else: sql = "insert into " if sql != pre_insert: # tdLog.debug("3insert sql:%s"%sql) - tsql.execute(sql) + tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return def prepareData(self): dbname="db_tsbs" - stabname1="readings" - stabname2="diagnostics" - ctbnamePre1="rct" + stabname1=f"{dbname}.readings" + stabname2=f"{dbname}.diagnostics" + ctbnamePre1="rct" ctbnamePre2="dct" ctbNums=50 self.ctbNums=ctbNums @@ -107,7 +107,7 @@ class TDTestCase: # tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')") # else: # tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')") - # for j in range(ctbNums): + # for j in range(ctbNums): # for i in range(rowNUms): # tdSql.execute( # f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )" @@ -133,106 +133,106 @@ class TDTestCase: # tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) - def tsbsIotQuery(self,insertinto=True): - + def tsbsIotQuery(self,insertinto=True, dbname="db_tsbs"): + tdSql.execute("use db_tsbs") - + # test interval and partition - tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ") + tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ") parRows=tdSql.queryRows - tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ") + tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ") tdSql.checkRows(parRows) - - - # test insert into + + + # test insert into if insertinto == True : - tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") - tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") - - tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") + tdSql.execute(f"create table {dbname}.testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") + tdSql.query(f"insert into {dbname}.testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") + + tdSql.query(f"insert into {dbname}.testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") # test paitition interval fill - tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;") + tdSql.query(f"select name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;") # test partition interval limit (PRcore-TD-17410) - tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);") + tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings partition BY name,driver,fleet interval (10m) limit 1);") tdSql.checkRows(self.ctbNums) # test partition interval Pseudo time-column - tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + tdSql.query(f"select count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") # 1 high-load: - tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;") + tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;") - tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;") + tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;") - # 2 stationary-trucks - tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)") - tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name") + # 2 stationary-trucks + tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)") + tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name") # 3 long-driving-sessions - tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;") + tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity from {dbname}.readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;") #4 long-daily-sessions - tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60") + tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity from {dbname}.readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60") # 5. avg-daily-driving-duration - tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;") + tdSql.query(f"select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from {dbname}.readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;") - # # 6. avg-daily-driving-session + # # 6. avg-daily-driving-session # #taosc core dumped - tdSql.query(" SELECT _wstart as ts,name,floor(avg(velocity)/5) AS mv FROM readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);") - # tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;") - # tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)") + tdSql.query(f"select _wstart as ts,name,floor(avg(velocity)/5) AS mv from {dbname}.readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);") + # tdSql.query(f"select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;") + # tdSql.query(f"select _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)") # 7. avg-load - tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;") + tdSql.query(f"select fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml from {dbname}.diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;") - # 8. daily-activity - tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + # 8. daily-activity + tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") - tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") - tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") + tdSql.query(f"select _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") - tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") + tdSql.query(f"select _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") # 9. breakdown-frequency # NULL ---count(NULL)=0 expect count(NULL)= 100 - tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ") + tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ") parRows=tdSql.queryRows assert parRows != 0 , "query result is wrong, query rows %d but expect > 0 " %parRows - tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;") - sql="select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;" + tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;") + sql=f"select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;" # for i in range(2): # tdSql.query("%s"%sql) - # quertR1=tdSql.queryResult + # quertR1=tdSql.queryResult # for j in range(50): # tdSql.query("%s"%sql) # quertR2=tdSql.queryResult - # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2) + # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2) + - #it's already supported: # last-loc - tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;") + tdSql.query(f"select last_row(ts),latitude,longitude,name,driver from {dbname}.readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;") #2. low-fuel - tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;") - + tdSql.query(f"select last_row(ts),name,driver,fuel_state,driver from {dbname}.diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;") + # 3. avg-vs-projected-fuel-consumption - tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet") - - def run(self): + tdSql.query(f"select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from {dbname}.readings where velocity > 1 group by fleet") + + def run(self): tdLog.printNoPrefix("==========step1:create database and table,insert data ==============") self.prepareData() self.tsbsIotQuery() diff --git a/tests/system-test/2-query/ttl_comment.py b/tests/system-test/2-query/ttl_comment.py index 33bd61b66c..c26393158c 100644 --- a/tests/system-test/2-query/ttl_comment.py +++ b/tests/system-test/2-query/ttl_comment.py @@ -26,20 +26,21 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor(), False) def run(self): + dbname="db" tdSql.prepare() - tdSql.error("create table ttl_table1(ts timestamp, i int) ttl 1.1") - tdSql.error("create table ttl_table2(ts timestamp, i int) ttl 1e1") - tdSql.error("create table ttl_table3(ts timestamp, i int) ttl -1") + tdSql.error(f"create table {dbname}.ttl_table1(ts timestamp, i int) ttl 1.1") + tdSql.error(f"create table {dbname}.ttl_table2(ts timestamp, i int) ttl 1e1") + tdSql.error(f"create table {dbname}.ttl_table3(ts timestamp, i int) ttl -1") print("============== STEP 1 ===== test normal table") - tdSql.execute("create table normal_table1(ts timestamp, i int)") - tdSql.execute("create table normal_table2(ts timestamp, i int) comment '' ttl 3") - tdSql.execute("create table normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'") + tdSql.execute(f"create table {dbname}.normal_table1(ts timestamp, i int)") + tdSql.execute(f"create table {dbname}.normal_table2(ts timestamp, i int) comment '' ttl 3") + tdSql.execute(f"create table {dbname}.normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'") tdSql.checkData(0, 0, 'normal_table1') @@ -58,32 +59,32 @@ class TDTestCase: tdSql.checkData(0, 7, 2147483647) tdSql.checkData(0, 8, 'hello') - tdSql.execute("alter table normal_table1 comment 'nihao'") + tdSql.execute(f"alter table {dbname}.normal_table1 comment 'nihao'") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'") tdSql.checkData(0, 0, 'normal_table1') tdSql.checkData(0, 8, 'nihao') - tdSql.execute("alter table normal_table1 comment ''") + tdSql.execute(f"alter table {dbname}.normal_table1 comment ''") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'") tdSql.checkData(0, 0, 'normal_table1') tdSql.checkData(0, 8, '') - tdSql.execute("alter table normal_table2 comment 'fly'") + tdSql.execute(f"alter table {dbname}.normal_table2 comment 'fly'") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table2'") tdSql.checkData(0, 0, 'normal_table2') tdSql.checkData(0, 8, 'fly') - tdSql.execute("alter table normal_table3 comment 'fly'") + tdSql.execute(f"alter table {dbname}.normal_table3 comment 'fly'") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'") tdSql.checkData(0, 0, 'normal_table3') tdSql.checkData(0, 8, 'fly') - tdSql.execute("alter table normal_table1 ttl 1") + tdSql.execute(f"alter table {dbname}.normal_table1 ttl 1") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'") tdSql.checkData(0, 0, 'normal_table1') tdSql.checkData(0, 7, 1) - tdSql.execute("alter table normal_table3 ttl 0") + tdSql.execute(f"alter table {dbname}.normal_table3 ttl 0") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'") tdSql.checkData(0, 0, 'normal_table3') tdSql.checkData(0, 7, 0) @@ -91,9 +92,9 @@ class TDTestCase: print("============== STEP 2 ===== test super table") - tdSql.execute("create table super_table1(ts timestamp, i int) tags(t int)") - tdSql.execute("create table super_table2(ts timestamp, i int) tags(t int) comment ''") - tdSql.execute("create table super_table3(ts timestamp, i int) tags(t int) comment 'super'") + tdSql.execute(f"create table {dbname}.super_table1(ts timestamp, i int) tags(t int)") + tdSql.execute(f"create table {dbname}.super_table2(ts timestamp, i int) tags(t int) comment ''") + tdSql.execute(f"create table {dbname}.super_table3(ts timestamp, i int) tags(t int) comment 'super'") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'") tdSql.checkData(0, 0, 'super_table1') @@ -110,32 +111,32 @@ class TDTestCase: tdSql.checkData(0, 6, 'super') - tdSql.execute("alter table super_table1 comment 'nihao'") + tdSql.execute(f"alter table {dbname}.super_table1 comment 'nihao'") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'") tdSql.checkData(0, 0, 'super_table1') tdSql.checkData(0, 6, 'nihao') - tdSql.execute("alter table super_table1 comment ''") + tdSql.execute(f"alter table {dbname}.super_table1 comment ''") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'") tdSql.checkData(0, 0, 'super_table1') tdSql.checkData(0, 6, '') - tdSql.execute("alter table super_table2 comment 'fly'") + tdSql.execute(f"alter table {dbname}.super_table2 comment 'fly'") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table2'") tdSql.checkData(0, 0, 'super_table2') tdSql.checkData(0, 6, 'fly') - tdSql.execute("alter table super_table3 comment 'tdengine'") + tdSql.execute(f"alter table {dbname}.super_table3 comment 'tdengine'") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table3'") tdSql.checkData(0, 0, 'super_table3') tdSql.checkData(0, 6, 'tdengine') print("============== STEP 3 ===== test child table") - tdSql.execute("create table child_table1 using super_table1 tags(1) ttl 10") - tdSql.execute("create table child_table2 using super_table1 tags(1) comment ''") - tdSql.execute("create table child_table3 using super_table1 tags(1) comment 'child'") - tdSql.execute("insert into child_table4 using super_table1 tags(1) values(now, 1)") + tdSql.execute(f"create table {dbname}.child_table1 using {dbname}.super_table1 tags(1) ttl 10") + tdSql.execute(f"create table {dbname}.child_table2 using {dbname}.super_table1 tags(1) comment ''") + tdSql.execute(f"create table {dbname}.child_table3 using {dbname}.super_table1 tags(1) comment 'child'") + tdSql.execute(f"insert into {dbname}.child_table4 using {dbname}.super_table1 tags(1) values(now, 1)") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'") @@ -160,38 +161,38 @@ class TDTestCase: tdSql.checkData(0, 8, None) - tdSql.execute("alter table child_table1 comment 'nihao'") + tdSql.execute(f"alter table {dbname}.child_table1 comment 'nihao'") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'") tdSql.checkData(0, 0, 'child_table1') tdSql.checkData(0, 8, 'nihao') - tdSql.execute("alter table child_table1 comment ''") + tdSql.execute(f"alter table {dbname}.child_table1 comment ''") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'") tdSql.checkData(0, 0, 'child_table1') tdSql.checkData(0, 8, '') - tdSql.execute("alter table child_table2 comment 'fly'") + tdSql.execute(f"alter table {dbname}.child_table2 comment 'fly'") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table2'") tdSql.checkData(0, 0, 'child_table2') tdSql.checkData(0, 8, 'fly') - tdSql.execute("alter table child_table3 comment 'tdengine'") + tdSql.execute(f"alter table {dbname}.child_table3 comment 'tdengine'") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'") tdSql.checkData(0, 0, 'child_table3') tdSql.checkData(0, 8, 'tdengine') - tdSql.execute("alter table child_table4 comment 'tdengine'") + tdSql.execute(f"alter table {dbname}.child_table4 comment 'tdengine'") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'") tdSql.checkData(0, 0, 'child_table4') tdSql.checkData(0, 8, 'tdengine') - tdSql.execute("alter table child_table4 ttl 9") + tdSql.execute(f"alter table {dbname}.child_table4 ttl 9") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'") tdSql.checkData(0, 0, 'child_table4') tdSql.checkData(0, 7, 9) - tdSql.execute("alter table child_table3 ttl 9") + tdSql.execute(f"alter table {dbname}.child_table3 ttl 9") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'") tdSql.checkData(0, 0, 'child_table3') tdSql.checkData(0, 7, 9) @@ -203,4 +204,3 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) - diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py index 8281527bd4..62940477cf 100644 --- a/tests/system-test/2-query/twa.py +++ b/tests/system-test/2-query/twa.py @@ -7,10 +7,7 @@ import platform import math class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -21,46 +18,45 @@ class TDTestCase: self.row_nums = 100 self.time_step = 1000 - def prepare_datas_of_distribute(self): + def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) for i in range(self.tb_nums): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') ts = self.ts for j in range(self.row_nums): ts+=j*self.time_step tdSql.execute( - f"insert into ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdLog.info(" prepare data for distributed_aggregate done! ") - def twa_support_types(self): - tdSql.query("desc stb1 ") + def twa_support_types(self, dbname="testdb"): + tdSql.query(f"desc {dbname}.stb1 ") schema_list = tdSql.queryResult for col_type in schema_list: if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE"]: - tdSql.query(f" select twa({col_type[0]}) from stb1 partition by tbname ") + tdSql.query(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ") else: - tdSql.error(f" select twa({col_type[0]}) from stb1 partition by tbname ") + tdSql.error(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ") - def check_distribute_datas(self): + def check_distribute_datas(self, dbname="testdb"): # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -69,7 +65,7 @@ class TDTestCase: vnode_tables[vgroup_id[0]]=[] # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query(f"select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'") + tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -83,28 +79,28 @@ class TDTestCase: if count < 2: tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") - def distribute_twa_query(self): + def distribute_twa_query(self, dbname="testdb"): # basic filter - tdSql.query(" select twa(c1) from ct1 ") + tdSql.query(f"select twa(c1) from {dbname}.ct1 ") tdSql.checkData(0,0,1.000000000) - tdSql.query(" select twa(c1) from stb1 partition by tbname ") + tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,1.000000000) - tdSql.query(" select twa(c2) from stb1 group by tbname ") + tdSql.query(f"select twa(c2) from {dbname}.stb1 group by tbname ") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,11111.000000000) - tdSql.query("select twa(c1+c2) from stb1 partition by tbname ") + tdSql.query(f"select twa(c1+c2) from {dbname}.stb1 partition by tbname ") tdSql.checkData(0,0,11112.000000000) - tdSql.query("select twa(c1) from stb1 partition by t1") + tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by t1") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,1.000000000) # union all - tdSql.query(" select twa(c1) from stb1 partition by tbname union all select twa(c1) from stb1 partition by tbname ") + tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname union all select twa(c1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(40) tdSql.checkData(0,0,1.000000000) @@ -112,26 +108,23 @@ class TDTestCase: tdSql.execute(" create database if not exists db ") tdSql.execute(" use db ") - tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") - tdSql.execute(" create table tb1 using st tags(1) ") - tdSql.execute(" create table tb2 using st tags(2) ") + tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table db.tb1 using db.st tags(1) ") + tdSql.execute(" create table db.tb2 using db.st tags(2) ") for i in range(10): ts = i*10 + self.ts - tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") - tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)") - tdSql.query(" select twa(tb1.c1), twa(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.query(f"select twa(tb1.c1), twa(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts ") tdSql.checkRows(1) tdSql.checkData(0,0,4.500000000) tdSql.checkData(0,1,4.500000000) - # group by - tdSql.execute(" use testdb ") - # mixup with other functions - tdSql.query(" select twa(c1),twa(c2),max(c1),elapsed(ts) from stb1 ") + tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.stb1 ") tdSql.checkData(0,0,1.000000000) tdSql.checkData(0,1,11111.000000000) tdSql.checkData(0,2,1) diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 88767ab888..4040bb71cb 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -58,10 +58,10 @@ class TDTestCase: def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): table_reference = tb_list[0] - join_condition = table_reference + join_condition = f'{table_reference} {table_reference.split(".")[-1]}' join = "inner join" if INNER else "join" for i in range(len(tb_list[1:])): - join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + join_condition += f" {join} {tb_list[i+1]} {tb_list[i+1].split('.')[-1]} on {table_reference.split('.')[-1]}.{filter}={tb_list[i+1].split('.')[-1]}.{filter}" return join_condition @@ -76,7 +76,6 @@ class TDTestCase: elif query_conditon.startswith("min"): query_conditon = query_conditon[4:-1] - if query_conditon: return f" where {query_conditon} is not null" if col in NUM_COL: @@ -108,10 +107,10 @@ class TDTestCase: return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" @property - def __join_tblist(self): + def __join_tblist(self, dbname="db"): return [ - ["ct1", "t1"], - ["ct4", "t1"], + [f"{dbname}.ct1", f"{dbname}.t1"], + [f"{dbname}.ct4", f"{dbname}.t1"], # ["ct1", "ct2", "ct4"], # ["ct1", "ct2", "t1"], # ["ct1", "ct4", "t1"], @@ -120,10 +119,10 @@ class TDTestCase: ] @property - def __tb_liast(self): + def __tb_list(self, dbname="db"): return [ - "ct1", - "ct4", + f"{dbname}.ct1", + f"{dbname}.ct4", ] def sql_list(self): @@ -131,7 +130,8 @@ class TDTestCase: __join_tblist = self.__join_tblist for join_tblist in __join_tblist: for join_tb in join_tblist: - select_claus_list = self.__query_condition(join_tb) + join_tb_name = join_tb.split(".")[-1] + select_claus_list = self.__query_condition(join_tb_name) for select_claus in select_claus_list: group_claus = self.__group_condition( col=select_claus) where_claus = self.__where_condition(query_conditon=select_claus) @@ -141,9 +141,10 @@ class TDTestCase: self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), ) ) - __no_join_tblist = self.__tb_liast + __no_join_tblist = self.__tb_list for tb in __no_join_tblist: - select_claus_list = self.__query_condition(tb) + tb_name = join_tb.split(".")[-1] + select_claus_list = self.__query_condition(tb_name) for select_claus in select_claus_list: group_claus = self.__group_condition(col=select_claus) where_claus = self.__where_condition(query_conditon=select_claus) @@ -230,31 +231,29 @@ class TDTestCase: else: tdSql.error(f"{sqls[i]} union {sqls[j+i]}") - def __test_error(self): + def __test_error(self, dbname="db"): - tdSql.error( "show tables union show tables" ) - tdSql.error( "create table errtb1 union all create table errtb2" ) - tdSql.error( "drop table ct1 union all drop table ct3" ) - tdSql.error( "select c1 from ct1 union all drop table ct3" ) - tdSql.error( "select c1 from ct1 union all '' " ) - tdSql.error( " '' union all select c1 from ct1 " ) - # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + tdSql.error( f"show {dbname}.tables union show {dbname}.tables" ) + tdSql.error( f"create table {dbname}.errtb1 union all create table {dbname}.errtb2" ) + tdSql.error( f"drop table {dbname}.ct1 union all drop table {dbname}.ct3" ) + tdSql.error( f"select c1 from {dbname}.ct1 union all drop table {dbname}.ct3" ) + tdSql.error( f"select c1 from {dbname}.ct1 union all '' " ) + tdSql.error( f" '' union all select c1 from{dbname}. ct1 " ) def all_test(self): self.__test_error() self.union_check() - - def __create_tb(self): + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) + ) tags (tag1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -264,30 +263,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') - { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -303,7 +301,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -319,13 +317,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -341,7 +339,6 @@ class TDTestCase: ''' ) - def run(self): tdSql.prepare() @@ -355,8 +352,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py index ccf7e287e2..ec77cbbcdc 100644 --- a/tests/system-test/2-query/unique.py +++ b/tests/system-test/2-query/unique.py @@ -11,49 +11,46 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -69,84 +66,84 @@ class TDTestCase: ''' ) - def test_errors(self): + def test_errors(self, dbname="db"): error_sql_lists = [ - "select unique from t1", - "select unique(123--123)==1 from t1", - "select unique(123,123) from t1", - "select unique(c1,ts) from t1", - "select unique(c1,c1,ts) from t1", - "select unique(c1) as 'd1' from t1", - "select unique(c1 ,c2 ) from t1", - "select unique(c1 ,NULL) from t1", - "select unique(,) from t1;", - "select unique(floor(c1) ab from t1)", - "select unique(c1) as int from t1", - "select unique('c1') from t1", - "select unique(NULL) from t1", - "select unique('') from t1", - "select unique(c%) from t1", - "select unique(t1) from t1", - "select unique(True) from t1", - "select unique(c1) , count(c1) from t1", - "select unique(c1) , avg(c1) from t1", - "select unique(c1) , min(c1) from t1", - "select unique(c1) , spread(c1) from t1", - "select unique(c1) , diff(c1) from t1", - #"select unique(c1) , abs(c1) from t1", # support - #"select unique(c1) , c1 from t1", - "select unique from stb1 partition by tbname", - "select unique(123--123)==1 from stb1 partition by tbname", - "select unique(123) from stb1 partition by tbname", - "select unique(c1,ts) from stb1 partition by tbname", - "select unique(c1,c1,ts) from stb1 partition by tbname", - "select unique(c1) as 'd1' from stb1 partition by tbname", - "select unique(c1 ,c2 ) from stb1 partition by tbname", - "select unique(c1 ,NULL) from stb1 partition by tbname", - "select unique(,) from stb1 partition by tbname;", - #"select unique(floor(c1) ab from stb1 partition by tbname)", # support - #"select unique(c1) as int from stb1 partition by tbname", - "select unique('c1') from stb1 partition by tbname", - "select unique(NULL) from stb1 partition by tbname", - "select unique('') from stb1 partition by tbname", - "select unique(c%) from stb1 partition by tbname", - #"select unique(t1) from stb1 partition by tbname", # support - "select unique(True) from stb1 partition by tbname", - "select unique(c1) , count(c1) from stb1 partition by tbname", - "select unique(c1) , avg(c1) from stb1 partition by tbname", - "select unique(c1) , min(c1) from stb1 partition by tbname", - "select unique(c1) , spread(c1) from stb1 partition by tbname", - "select unique(c1) , diff(c1) from stb1 partition by tbname", - #"select unique(c1) , abs(c1) from stb1 partition by tbname", # support - #"select unique(c1) , c1 from stb1 partition by tbname" # support + f"select unique from {dbname}.t1", + f"select unique(123--123)==1 from {dbname}.t1", + f"select unique(123,123) from {dbname}.t1", + f"select unique(c1,ts) from {dbname}.t1", + f"select unique(c1,c1,ts) from {dbname}.t1", + f"select unique(c1) as 'd1' from {dbname}.t1", + f"select unique(c1 ,c2 ) from {dbname}.t1", + f"select unique(c1 ,NULL) from {dbname}.t1", + f"select unique(,) from {dbname}.t1;", + f"select unique(floor(c1) ab from {dbname}.t1)", + f"select unique(c1) as int from {dbname}.t1", + f"select unique('c1') from {dbname}.t1", + f"select unique(NULL) from {dbname}.t1", + f"select unique('') from {dbname}.t1", + f"select unique(c%) from {dbname}.t1", + f"select unique(t1) from {dbname}.t1", + f"select unique(True) from {dbname}.t1", + f"select unique(c1) , count(c1) from {dbname}.t1", + f"select unique(c1) , avg(c1) from {dbname}.t1", + f"select unique(c1) , min(c1) from {dbname}.t1", + f"select unique(c1) , spread(c1) from {dbname}.t1", + f"select unique(c1) , diff(c1) from {dbname}.t1", + #f"select unique(c1) , abs(c1) from {dbname}.t1", # support + #f"select unique(c1) , c1 from {dbname}.t1", + f"select unique from {dbname}.stb1 partition by tbname", + f"select unique(123--123)==1 from {dbname}.stb1 partition by tbname", + f"select unique(123) from {dbname}.stb1 partition by tbname", + f"select unique(c1,ts) from {dbname}.stb1 partition by tbname", + f"select unique(c1,c1,ts) from {dbname}.stb1 partition by tbname", + f"select unique(c1) as 'd1' from {dbname}.stb1 partition by tbname", + f"select unique(c1 ,c2 ) from {dbname}.stb1 partition by tbname", + f"select unique(c1 ,NULL) from {dbname}.stb1 partition by tbname", + f"select unique(,) from {dbname}.stb1 partition by tbname;", + #f"select unique(floor(c1) ab from {dbname}.stb1 partition by tbname)", # support + #f"select unique(c1) as int from {dbname}.stb1 partition by tbname", + f"select unique('c1') from {dbname}.stb1 partition by tbname", + f"select unique(NULL) from {dbname}.stb1 partition by tbname", + f"select unique('') from {dbname}.stb1 partition by tbname", + f"select unique(c%) from {dbname}.stb1 partition by tbname", + #f"select unique(t1) from {dbname}.stb1 partition by tbname", # support + f"select unique(True) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , count(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , avg(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , min(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , spread(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , diff(c1) from {dbname}.stb1 partition by tbname", + #f"select unique(c1) , abs(c1) from {dbname}.stb1 partition by tbname", # support + #f"select unique(c1) , c1 from {dbname}.stb1 partition by tbname" # support ] for error_sql in error_sql_lists: tdSql.error(error_sql) pass - def support_types(self): + def support_types(self, dbname="db"): other_no_value_types = [ - "select unique(ts) from t1" , - "select unique(c7) from t1", - "select unique(c8) from t1", - "select unique(c9) from t1", - "select unique(ts) from ct1" , - "select unique(c7) from ct1", - "select unique(c8) from ct1", - "select unique(c9) from ct1", - "select unique(ts) from ct3" , - "select unique(c7) from ct3", - "select unique(c8) from ct3", - "select unique(c9) from ct3", - "select unique(ts) from ct4" , - "select unique(c7) from ct4", - "select unique(c8) from ct4", - "select unique(c9) from ct4", - "select unique(ts) from stb1 partition by tbname" , - "select unique(c7) from stb1 partition by tbname", - "select unique(c8) from stb1 partition by tbname", - "select unique(c9) from stb1 partition by tbname" + f"select unique(ts) from {dbname}.t1" , + f"select unique(c7) from {dbname}.t1", + f"select unique(c8) from {dbname}.t1", + f"select unique(c9) from {dbname}.t1", + f"select unique(ts) from {dbname}.ct1" , + f"select unique(c7) from {dbname}.ct1", + f"select unique(c8) from {dbname}.ct1", + f"select unique(c9) from {dbname}.ct1", + f"select unique(ts) from {dbname}.ct3" , + f"select unique(c7) from {dbname}.ct3", + f"select unique(c8) from {dbname}.ct3", + f"select unique(c9) from {dbname}.ct3", + f"select unique(ts) from {dbname}.ct4" , + f"select unique(c7) from {dbname}.ct4", + f"select unique(c8) from {dbname}.ct4", + f"select unique(c9) from {dbname}.ct4", + f"select unique(ts) from {dbname}.stb1 partition by tbname" , + f"select unique(c7) from {dbname}.stb1 partition by tbname", + f"select unique(c8) from {dbname}.stb1 partition by tbname", + f"select unique(c9) from {dbname}.stb1 partition by tbname" ] for type_sql in other_no_value_types: @@ -154,43 +151,43 @@ class TDTestCase: tdLog.info("support type ok , sql is : %s"%type_sql) type_sql_lists = [ - "select unique(c1) from t1", - "select unique(c2) from t1", - "select unique(c3) from t1", - "select unique(c4) from t1", - "select unique(c5) from t1", - "select unique(c6) from t1", + f"select unique(c1) from {dbname}.t1", + f"select unique(c2) from {dbname}.t1", + f"select unique(c3) from {dbname}.t1", + f"select unique(c4) from {dbname}.t1", + f"select unique(c5) from {dbname}.t1", + f"select unique(c6) from {dbname}.t1", - "select unique(c1) from ct1", - "select unique(c2) from ct1", - "select unique(c3) from ct1", - "select unique(c4) from ct1", - "select unique(c5) from ct1", - "select unique(c6) from ct1", + f"select unique(c1) from {dbname}.ct1", + f"select unique(c2) from {dbname}.ct1", + f"select unique(c3) from {dbname}.ct1", + f"select unique(c4) from {dbname}.ct1", + f"select unique(c5) from {dbname}.ct1", + f"select unique(c6) from {dbname}.ct1", - "select unique(c1) from ct3", - "select unique(c2) from ct3", - "select unique(c3) from ct3", - "select unique(c4) from ct3", - "select unique(c5) from ct3", - "select unique(c6) from ct3", + f"select unique(c1) from {dbname}.ct3", + f"select unique(c2) from {dbname}.ct3", + f"select unique(c3) from {dbname}.ct3", + f"select unique(c4) from {dbname}.ct3", + f"select unique(c5) from {dbname}.ct3", + f"select unique(c6) from {dbname}.ct3", - "select unique(c1) from stb1 partition by tbname", - "select unique(c2) from stb1 partition by tbname", - "select unique(c3) from stb1 partition by tbname", - "select unique(c4) from stb1 partition by tbname", - "select unique(c5) from stb1 partition by tbname", - "select unique(c6) from stb1 partition by tbname", + f"select unique(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c2) from {dbname}.stb1 partition by tbname", + f"select unique(c3) from {dbname}.stb1 partition by tbname", + f"select unique(c4) from {dbname}.stb1 partition by tbname", + f"select unique(c5) from {dbname}.stb1 partition by tbname", + f"select unique(c6) from {dbname}.stb1 partition by tbname", - "select unique(c6) as alisb from stb1 partition by tbname", - "select unique(c6) alisb from stb1 partition by tbname", + f"select unique(c6) as alisb from {dbname}.stb1 partition by tbname", + f"select unique(c6) alisb from {dbname}.stb1 partition by tbname", ] for type_sql in type_sql_lists: tdSql.query(type_sql) def check_unique_table(self , unique_sql): - # unique_sql = "select unique(c1) from ct1" + # unique_sql = f"select unique(c1) from {dbname}.ct1" origin_sql = unique_sql.replace("unique(","").replace(")","") tdSql.query(unique_sql) unique_result = tdSql.queryResult @@ -219,83 +216,83 @@ class TDTestCase: else: tdLog.exit(" unique query check fail , unique sql is: %s " %unique_sql) - def basic_unique_function(self): + def basic_unique_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select unique(c1) from ct3") + tdSql.query(f"select unique(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c2) from ct3") + tdSql.query(f"select unique(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c3) from ct3") + tdSql.query(f"select unique(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c4) from ct3") + tdSql.query(f"select unique(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c5) from ct3") + tdSql.query(f"select unique(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c6) from ct3") + tdSql.query(f"select unique(c6) from {dbname}.ct3") # will support _rowts mix with - # tdSql.query("select unique(c6),_rowts from ct3") + # tdSql.query(f"select unique(c6),_rowts from {dbname}.ct3") # auto check for t1 table # used for regular table - tdSql.query("select unique(c1) from t1") + tdSql.query(f"select unique(c1) from {dbname}.t1") - tdSql.query("desc t1") + tdSql.query(f"desc {dbname}.t1") col_lists_rows = tdSql.queryResult col_lists = [] for col_name in col_lists_rows: col_lists.append(col_name[0]) for col in col_lists: - self.check_unique_table(f"select unique({col}) from t1") + self.check_unique_table(f"select unique({col}) from {dbname}.t1") # unique with super tags - tdSql.query("select unique(c1) from ct1") + tdSql.query(f"select unique(c1) from {dbname}.ct1") tdSql.checkRows(10) - tdSql.query("select unique(c1) from ct4") + tdSql.query(f"select unique(c1) from {dbname}.ct4") tdSql.checkRows(10) - #tdSql.error("select unique(c1),tbname from ct1") #support - #tdSql.error("select unique(c1),t1 from ct1") #support + #tdSql.error(f"select unique(c1),tbname from {dbname}.ct1") #support + #tdSql.error(f"select unique(c1),t1 from {dbname}.ct1") #support # unique with common col - #tdSql.error("select unique(c1) ,ts from ct1") - #tdSql.error("select unique(c1) ,c1 from ct1") + #tdSql.error(f"select unique(c1) ,ts from {dbname}.ct1") + #tdSql.error(f"select unique(c1) ,c1 from {dbname}.ct1") # unique with scalar function - #tdSql.error("select unique(c1) ,abs(c1) from ct1") - tdSql.error("select unique(c1) , unique(c2) from ct1") - #tdSql.error("select unique(c1) , abs(c2)+2 from ct1") + #tdSql.error(f"select unique(c1) ,abs(c1) from {dbname}.ct1") + tdSql.error(f"select unique(c1) , unique(c2) from {dbname}.ct1") + #tdSql.error(f"select unique(c1) , abs(c2)+2 from {dbname}.ct1") # unique with aggregate function - tdSql.error("select unique(c1) ,sum(c1) from ct1") - tdSql.error("select unique(c1) ,max(c1) from ct1") - tdSql.error("select unique(c1) ,csum(c1) from ct1") - tdSql.error("select unique(c1) ,count(c1) from ct1") + tdSql.error(f"select unique(c1) ,sum(c1) from {dbname}.ct1") + tdSql.error(f"select unique(c1) ,max(c1) from {dbname}.ct1") + tdSql.error(f"select unique(c1) ,csum(c1) from {dbname}.ct1") + tdSql.error(f"select unique(c1) ,count(c1) from {dbname}.ct1") # unique with filter where - tdSql.query("select unique(c1) from ct4 where c1 is null") + tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 is null") tdSql.checkData(0, 0, None) - tdSql.query("select unique(c1) from ct4 where c1 >2 order by 1") + tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 >2 order by 1") tdSql.checkData(0, 0, 3) tdSql.checkData(1, 0, 4) tdSql.checkData(2, 0, 5) tdSql.checkData(5, 0, 8) - tdSql.query("select unique(c1) from ct4 where c2 between 0 and 99999 order by 1 desc") + tdSql.query(f"select unique(c1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1 desc") tdSql.checkData(0, 0, 8) tdSql.checkData(1, 0, 7) tdSql.checkData(2, 0, 6) @@ -307,43 +304,43 @@ class TDTestCase: tdSql.checkData(8, 0, 0) # unique with union all - tdSql.query("select unique(c1) from ct4 union all select c1 from ct1") + tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select c1 from {dbname}.ct1") tdSql.checkRows(23) - tdSql.query("select unique(c1) from ct4 union all select distinct(c1) from ct4") + tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4") tdSql.checkRows(20) - tdSql.query("select unique(c2) from ct4 union all select abs(c2)/2 from ct4") + tdSql.query(f"select unique(c2) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4") tdSql.checkRows(22) # unique with join # prepare join datas with same ts tdSql.execute(" use db ") - tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table tb1 using st1 tags(1)") - tdSql.execute(" create table tb2 using st1 tags(2)") + tdSql.execute(" create stable db.st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table db.tb1 using db.st1 tags(1)") + tdSql.execute(" create table db.tb2 using db.st1 tags(2)") - tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table ttb1 using st2 tags(1)") - tdSql.execute(" create table ttb2 using st2 tags(2)") + tdSql.execute(" create stable db.st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table db.ttb1 using db.st2 tags(1)") + tdSql.execute(" create table db.ttb2 using db.st2 tags(2)") start_ts = 1622369635000 # 2021-05-30 18:13:55 for i in range(10): ts_value = start_ts+i*1000 - tdSql.execute(f" insert into tb1 values({ts_value} , {i})") - tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})") - tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1") + tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1") tdSql.checkRows(10) tdSql.checkData(0,0,0) tdSql.checkData(1,0,1) tdSql.checkData(2,0,2) tdSql.checkData(9,0,9) - tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1") + tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1") tdSql.checkRows(20) tdSql.checkData(0,0,0) tdSql.checkData(2,0,1) @@ -351,23 +348,23 @@ class TDTestCase: tdSql.checkData(18,0,9) # nest query - # tdSql.query("select unique(c1) from (select c1 from ct1)") - tdSql.query("select c1 from (select unique(c1) c1 from ct4) order by 1 desc nulls first") + # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)") + tdSql.query(f"select c1 from (select unique(c1) c1 from {dbname}.ct4) order by 1 desc nulls first") tdSql.checkRows(10) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, 8) tdSql.checkData(9, 0, 0) - tdSql.query("select sum(c1) from (select unique(c1) c1 from ct1)") + tdSql.query(f"select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)") tdSql.checkRows(1) tdSql.checkData(0, 0, 45) - tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select unique(c1) c1 from ct1)") + tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)") tdSql.checkRows(2) tdSql.checkData(0, 0, 45) tdSql.checkData(1, 0, 45) - tdSql.query("select 1-abs(c1) from (select unique(c1) c1 from ct4) order by 1 nulls first") + tdSql.query(f"select 1-abs(c1) from (select unique(c1) c1 from {dbname}.ct4) order by 1 nulls first") tdSql.checkRows(10) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, -7.000000000) @@ -375,104 +372,103 @@ class TDTestCase: # bug for stable #partition by tbname - # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ") # tdSql.checkRows(21) - # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ") # tdSql.checkRows(21) # group by - tdSql.error("select unique(c1) from ct1 group by c1") - tdSql.error("select unique(c1) from ct1 group by tbname") + tdSql.error(f"select unique(c1) from {dbname}.ct1 group by c1") + tdSql.error(f"select unique(c1) from {dbname}.ct1 group by tbname") # super table # super table - tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname") - tdSql.query("select tail(c1,2) from stb1 partition by tbname") + tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname") + tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) # bug need fix - # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname") # tdSql.checkRows(4) - # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname") # tdSql.checkRows(4) - # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - tdSql.query("select tail(c1,2) from stb1 partition by tbname") + tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) # # bug need fix - # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname ") + # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ") # tdSql.checkRows(3) - # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname ") + # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname ") # tdSql.checkRows(3) - tdSql.query(" select unique(t1) from stb1 ") + tdSql.query(f"select unique(t1) from {dbname}.stb1 ") tdSql.checkRows(2) - tdSql.query(" select unique(t1+c1) from stb1 ") + tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 ") tdSql.checkRows(13) - tdSql.query(" select unique(t1+c1) from stb1 partition by tbname ") + tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(20) - tdSql.query(" select unique(t1) from stb1 partition by tbname ") + tdSql.query(f"select unique(t1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(2) # nest query - tdSql.query(" select unique(c1) from (select _rowts , t1 ,c1 , tbname from stb1 ) ") + tdSql.query(f"select unique(c1) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ") tdSql.checkRows(11) tdSql.checkData(0,0,6) tdSql.checkData(10,0,3) - tdSql.query("select unique(t1) from (select _rowts , t1 , tbname from stb1 )") + tdSql.query(f"select unique(t1) from (select _rowts , t1 , tbname from {dbname}.stb1 )") tdSql.checkRows(2) tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - tdSql.query("select unique(c2) from sub1_bound order by 1 desc") + tdSql.query(f"select unique(c2) from {dbname}.sub1_bound order by 1 desc") tdSql.checkRows(5) tdSql.checkData(0,0,9223372036854775807) diff --git a/tests/system-test/2-query/upper.py b/tests/system-test/2-query/upper.py index bb485161dd..f15a6f3ba7 100644 --- a/tests/system-test/2-query/upper.py +++ b/tests/system-test/2-query/upper.py @@ -95,16 +95,16 @@ class TDTestCase: return sqls - def __test_current(self): + def __test_current(self, dbname="db"): tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__upper_current_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__upper_err_check(tb): @@ -112,22 +112,20 @@ class TDTestCase: tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") - def all_test(self): - self.__test_current() - self.__test_error() + def all_test(self, dbname="db"): + self.__test_current(dbname) + self.__test_error(dbname) - - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) + ) tags (tag1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -137,83 +135,82 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values - ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } ) - ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } ) + f'''insert into {dbname}.ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, - { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000} + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} ) ( { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, - { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000} + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} ) ''' ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, - { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, - { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, - "binary_{i}", "nchar_{i}", { now_time - 1000 * i } ) + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, - "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, - "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) - def run(self): tdSql.prepare() @@ -226,8 +223,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/varchar.py b/tests/system-test/2-query/varchar.py index 5cc6c8e399..17c3ea6333 100644 --- a/tests/system-test/2-query/varchar.py +++ b/tests/system-test/2-query/varchar.py @@ -14,43 +14,44 @@ class TDTestCase: tdSql.init(conn.cursor()) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + dbname = "db" tdSql.prepare() tdLog.printNoPrefix("==========step1:create table") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') tdLog.printNoPrefix("==========step2:insert data") for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "varchar1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "varchar2", "nchar2", now()+2a ) @@ -70,7 +71,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3: cast on varchar") - tdSql.query("select c8 from ct1") + tdSql.query(f"select c8 from {dbname}.ct1") for i in range(tdSql.queryRows): tdSql.checkData(i,0, data_ct1_c8[i]) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index a91ec01a3b..335247c16d 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -124,8 +124,14 @@ python3 ./test.py -f 2-query/leastsquares.py python3 ./test.py -f 2-query/leastsquares.py -R python3 ./test.py -f 2-query/length.py python3 ./test.py -f 2-query/length.py -R +python3 ./test.py -f 2-query/log.py +# python3 ./test.py -f 2-query/log.py -R +python3 ./test.py -f 2-query/lower.py +python3 ./test.py -f 2-query/lower.py -R python3 ./test.py -f 2-query/ltrim.py python3 ./test.py -f 2-query/ltrim.py -R +python3 ./test.py -f 2-query/mavg.py +python3 ./test.py -f 2-query/mavg.py -R python3 ./test.py -f 2-query/max_partition.py python3 ./test.py -f 2-query/max_partition.py -R python3 ./test.py -f 2-query/max.py @@ -147,7 +153,7 @@ python3 ./test.py -f 2-query/rtrim.py -R python3 ./test.py -f 2-query/sample.py python3 ./test.py -f 2-query/sample.py -R python3 ./test.py -f 2-query/sin.py -# python3 ./test.py -f 2-query/sin.py -R +python3 ./test.py -f 2-query/sin.py -R python3 ./test.py -f 2-query/smaTest.py python3 ./test.py -f 2-query/smaTest.py -R python3 ./test.py -f 2-query/sml.py @@ -155,7 +161,7 @@ python3 ./test.py -f 2-query/sml.py -R python3 ./test.py -f 2-query/spread.py python3 ./test.py -f 2-query/spread.py -R python3 ./test.py -f 2-query/sqrt.py -# python3 ./test.py -f 2-query/sqrt.py -R +python3 ./test.py -f 2-query/sqrt.py -R python3 ./test.py -f 2-query/statecount.py python3 ./test.py -f 2-query/statecount.py -R python3 ./test.py -f 2-query/stateduration.py @@ -167,32 +173,48 @@ python3 ./test.py -f 2-query/sum.py -R python3 ./test.py -f 2-query/tail.py python3 ./test.py -f 2-query/tail.py -R python3 ./test.py -f 2-query/tan.py -python3 ./test.py -f 2-query/tan.py -R +# python3 ./test.py -f 2-query/tan.py -R +python3 ./test.py -f 2-query/Timediff.py +python3 ./test.py -f 2-query/Timediff.py -R +python3 ./test.py -f 2-query/timetruncate.py +# python3 ./test.py -f 2-query/timetruncate.py -R +python3 ./test.py -f 2-query/timezone.py +python3 ./test.py -f 2-query/timezone.py -R +python3 ./test.py -f 2-query/To_iso8601.py +python3 ./test.py -f 2-query/To_iso8601.py -R +python3 ./test.py -f 2-query/To_unixtimestamp.py +python3 ./test.py -f 2-query/To_unixtimestamp.py -R +python3 ./test.py -f 2-query/Today.py +# python3 ./test.py -f 2-query/Today.py -R +python3 ./test.py -f 2-query/top.py +python3 ./test.py -f 2-query/top.py -R +python3 ./test.py -f 2-query/tsbsQuery.py +python3 ./test.py -f 2-query/tsbsQuery.py -R +python3 ./test.py -f 2-query/ttl_comment.py +python3 ./test.py -f 2-query/ttl_comment.py -R +python3 ./test.py -f 2-query/twa.py +python3 ./test.py -f 2-query/twa.py -R +python3 ./test.py -f 2-query/union.py +python3 ./test.py -f 2-query/union.py -R +python3 ./test.py -f 2-query/unique.py +python3 ./test.py -f 2-query/unique.py -R +python3 ./test.py -f 2-query/upper.py +python3 ./test.py -f 2-query/upper.py -R +python3 ./test.py -f 2-query/varchar.py +python3 ./test.py -f 2-query/varchar.py -R python3 ./test.py -f 1-insert/update_data.py python3 ./test.py -f 1-insert/delete_data.py -python3 ./test.py -f 2-query/varchar.py -python3 ./test.py -f 2-query/upper.py -python3 ./test.py -f 2-query/lower.py python3 ./test.py -f 2-query/join2.py -python3 ./test.py -f 2-query/union.py python3 ./test.py -f 2-query/union1.py python3 ./test.py -f 2-query/concat2.py -python3 ./test.py -f 2-query/timezone.py -python3 ./test.py -f 2-query/Today.py -python3 ./test.py -f 2-query/To_iso8601.py -python3 ./test.py -f 2-query/To_unixtimestamp.py -python3 ./test.py -f 2-query/timetruncate.py -python3 ./test.py -f 2-query/Timediff.py python3 ./test.py -f 2-query/json_tag.py -python3 ./test.py -f 2-query/top.py -python3 ./test.py -f 2-query/log.py # python3 ./test.py -f 2-query/nestedQuery.py # TD-15983 subquery output duplicate name column. # Please Xiangyang Guo modify the following script @@ -200,13 +222,8 @@ python3 ./test.py -f 2-query/log.py python3 ./test.py -f 2-query/elapsed.py python3 ./test.py -f 2-query/csum.py -python3 ./test.py -f 2-query/mavg.py python3 ./test.py -f 2-query/function_diff.py -python3 ./test.py -f 2-query/unique.py -python3 ./test.py -f 2-query/ttl_comment.py -python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 2-query/queryQnode.py -python3 ./test.py -f 2-query/tsbsQuery.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3 From 4fc9d8c1ef5bbeb4b283acd8039989a4b86240bd Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Tue, 23 Aug 2022 22:14:06 +0800 Subject: [PATCH 12/72] docs: remove misspelled file --- docs/en/14-reference/03-connector/_preparition.mdx | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 docs/en/14-reference/03-connector/_preparition.mdx diff --git a/docs/en/14-reference/03-connector/_preparition.mdx b/docs/en/14-reference/03-connector/_preparition.mdx deleted file mode 100644 index 87538ebfd8..0000000000 --- a/docs/en/14-reference/03-connector/_preparition.mdx +++ /dev/null @@ -1,10 +0,0 @@ -- 已安装客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装) - -:::info - -由于 TDengine 的客户端驱动使用 C 语言编写,使用原生连接时需要加载系统对应安装在本地的客户端驱动共享库文件,通常包含在 TDengine 安装包。TDengine Linux 服务端安装包附带了 TDengine 客户端,也可以单独安装 [Linux 客户端](/get-started/) 。在 Windows 环境开发时需要安装 TDengine 对应的 [Windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) 。 - -- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 -- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 - -::: From 4392a0d0777f88344ddcbe722f79a9c3a5929295 Mon Sep 17 00:00:00 2001 From: cpwu Date: Mon, 29 Aug 2022 15:01:54 +0800 Subject: [PATCH 13/72] fix case --- tests/system-test/2-query/join2.py | 46 ++++++++++++++---------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py index a3818ed77d..5c8fe0f0f9 100644 --- a/tests/system-test/2-query/join2.py +++ b/tests/system-test/2-query/join2.py @@ -123,28 +123,28 @@ class TDTestCase: sqls = [] __join_tblist = self.__join_tblist for join_tblist in __join_tblist: - for join_tb in join_tblist: - select_claus_list = self.__query_condition(join_tb) - for select_claus in select_claus_list: - group_claus = self.__group_condition( col=select_claus) - where_claus = self.__where_condition( query_conditon=select_claus ) - having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" ) - sqls.extend( - ( - # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus), - # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist)), - # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ), - self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ), - # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ), - self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ), - ) + alias_tb = "tb1" + select_claus_list = self.__query_condition(alias_tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition( col=select_claus) + where_claus = self.__where_condition( query_conditon=select_claus ) + having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" ) + sqls.extend( + ( + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb)), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), having_claus ), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True) ), ) + ) return list(filter(None, sqls)) def __join_check(self,): @@ -341,10 +341,8 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute(f"flush database db") - tdSql.execute("use db") tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() From ba8b10e27b6e38c939626796a08ad17d64d31c55 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 29 Aug 2022 18:47:46 +0800 Subject: [PATCH 14/72] refactor(query): do some internal refactor. --- include/libs/function/function.h | 10 +- include/util/tpagedbuf.h | 3 +- source/libs/executor/src/executil.c | 6 +- source/libs/executor/src/executorimpl.c | 14 +- source/libs/executor/src/groupoperator.c | 4 +- source/libs/executor/src/projectoperator.c | 10 - source/libs/executor/src/timewindowoperator.c | 2 +- source/libs/executor/src/tlinearhash.c | 4 +- source/libs/executor/src/tsort.c | 4 +- source/libs/function/src/builtinsimpl.c | 276 +++++++++--------- source/libs/function/src/tpercentile.c | 2 +- source/util/src/tpagedbuf.c | 2 +- source/util/test/pageBufferTest.cpp | 34 +-- 13 files changed, 188 insertions(+), 183 deletions(-) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index c8db01625e..3f26eee86a 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -92,6 +92,8 @@ struct SResultRowEntryInfo; //for selectivity query, the corresponding tag value is assigned if the data is qualified typedef struct SSubsidiaryResInfo { int16_t num; + int32_t rowLen; + char* buf; // serialize data buffer struct SqlFunctionCtx **pCtx; } SSubsidiaryResInfo; @@ -118,6 +120,11 @@ typedef struct SInputColumnInfoData { uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions. } SInputColumnInfoData; +typedef struct SSerializeDataHandle { + struct SDiskbasedBuf* pBuf; + int32_t currentPage; +} SSerializeDataHandle; + // sql function runtime context typedef struct SqlFunctionCtx { SInputColumnInfoData input; @@ -137,10 +144,9 @@ typedef struct SqlFunctionCtx { SFuncExecFuncs fpSet; SScalarFuncExecFuncs sfp; struct SExprInfo *pExpr; - struct SDiskbasedBuf *pBuf; struct SSDataBlock *pSrcBlock; struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity - int32_t curBufPage; + SSerializeDataHandle saveHandle; bool isStream; char udfName[TSDB_FUNC_NAME_LEN]; diff --git a/include/util/tpagedbuf.h b/include/util/tpagedbuf.h index 57a489c0dd..9ab89273e6 100644 --- a/include/util/tpagedbuf.h +++ b/include/util/tpagedbuf.h @@ -58,11 +58,10 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem /** * * @param pBuf - * @param groupId * @param pageId * @return */ -void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId); +void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId); /** * diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 4b018f81ef..c6bc120c62 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -46,8 +46,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) { rowSize += pCtx[i].resDataInfo.interBufSize; } - rowSize += - (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData) + rowSize += (numOfOutput * sizeof(bool)); + // expand rowSize to mark if col is null for top/bottom result(saveTupleData) return rowSize; } @@ -1175,7 +1175,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, SqlFunctionCtx* pCtx = &pFuncCtx[i]; pCtx->functionId = -1; - pCtx->curBufPage = -1; pCtx->pExpr = pExpr; if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) { @@ -1219,6 +1218,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, pCtx->isStream = false; pCtx->param = pFunct->pParam; + pCtx->saveHandle.currentPage = -1; } for (int32_t i = 1; i < numOfOutput; ++i) { diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index cf6940c52a..9b102de5bf 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -187,7 +187,7 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int SIDList list = getDataBufPagesIdList(pResultBuf); if (taosArrayGetSize(list) == 0) { - pData = getNewBufPage(pResultBuf, tableGroupId, &pageId); + pData = getNewBufPage(pResultBuf, &pageId); pData->num = sizeof(SFilePage); } else { SPageInfo* pi = getLastPageInfo(list); @@ -198,7 +198,7 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int // release current page first, and prepare the next one releaseBufPageInfo(pResultBuf, pi); - pData = getNewBufPage(pResultBuf, tableGroupId, &pageId); + pData = getNewBufPage(pResultBuf, &pageId); if (pData != NULL) { pData->num = sizeof(SFilePage); } @@ -302,7 +302,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes SIDList list = getDataBufPagesIdList(pResultBuf); if (taosArrayGetSize(list) == 0) { - pData = getNewBufPage(pResultBuf, tid, &pageId); + pData = getNewBufPage(pResultBuf, &pageId); pData->num = sizeof(SFilePage); } else { SPageInfo* pi = getLastPageInfo(list); @@ -313,7 +313,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes // release current page first, and prepare the next one releaseBufPageInfo(pResultBuf, pi); - pData = getNewBufPage(pResultBuf, tid, &pageId); + pData = getNewBufPage(pResultBuf, &pageId); if (pData != NULL) { pData->num = sizeof(SFilePage); } @@ -3488,7 +3488,7 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf } for (int32_t i = 0; i < numOfCols; ++i) { - pSup->pCtx[i].pBuf = pAggSup->pResultBuf; + pSup->pCtx[i].saveHandle.pBuf = pAggSup->pResultBuf; } return TSDB_CODE_SUCCESS; @@ -3520,6 +3520,7 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) { } taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx); + taosMemoryFreeClear(pCtx[i].subsidiaries.buf); taosMemoryFree(pCtx[i].input.pData); taosMemoryFree(pCtx[i].input.pColumnDataAgg); } @@ -4704,7 +4705,8 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF } int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, tsTempDir); for (int32_t i = 0; i < numOfOutput; ++i) { - pCtx[i].pBuf = pSup->pResultBuf; + pCtx[i].saveHandle.pBuf = pSup->pResultBuf; } + return code; } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 9d7e833b19..5d123f723e 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -547,7 +547,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len); int32_t pageId = 0; - pPage = getNewBufPage(pInfo->pBuf, 0, &pageId); + pPage = getNewBufPage(pInfo->pBuf, &pageId); taosArrayPush(p->pPageList, &pageId); *(int32_t *) pPage = 0; @@ -562,7 +562,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf // add a new page for current group int32_t pageId = 0; - pPage = getNewBufPage(pInfo->pBuf, 0, &pageId); + pPage = getNewBufPage(pInfo->pBuf, &pageId); taosArrayPush(p->pPageList, &pageId); memset(pPage, 0, getBufPageSize(pInfo->pBuf)); } diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 0661ccd390..2f12a0d19b 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -195,16 +195,6 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS return PROJECT_RETRIEVE_DONE; } -void printDataBlock1(SSDataBlock* pBlock, const char* flag) { - if (!pBlock || pBlock->info.rows == 0) { - qDebug("===stream===printDataBlock: Block is Null or Empty"); - return; - } - char* pBuf = NULL; - qDebug("%s", dumpBlockData(pBlock, flag, &pBuf)); - taosMemoryFreeClear(pBuf); -} - SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { SProjectOperatorInfo* pProjectInfo = pOperator->info; SOptrBasicInfo* pInfo = &pProjectInfo->binfo; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 4909c8d387..d87e235ba5 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -3529,7 +3529,7 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* initBasicInfo(pBasicInfo, pResultBlock); for (int32_t i = 0; i < numOfCols; ++i) { - pSup->pCtx[i].pBuf = NULL; + pSup->pCtx[i].saveHandle.pBuf = NULL; } ASSERT(numOfCols > 0); diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c index e0752840db..cffabcb6ac 100644 --- a/source/libs/executor/src/tlinearhash.c +++ b/source/libs/executor/src/tlinearhash.c @@ -97,7 +97,7 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t // allocate the overflow buffer page to hold this k/v. int32_t newPageId = -1; - SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId); + SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, &newPageId); if (pNewPage == NULL) { return terrno; } @@ -227,7 +227,7 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) { } int32_t pageId = -1; - SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId); + SFilePage* p = getNewBufPage(pHashObj->pBuf, &pageId); if (p == NULL) { return terrno; } diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index fc411e850a..168cd21c44 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -180,7 +180,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) { } int32_t pageId = -1; - void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId); + void* pPage = getNewBufPage(pHandle->pBuf, &pageId); if (pPage == NULL) { blockDataDestroy(p); return terrno; @@ -512,7 +512,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { } int32_t pageId = -1; - void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId); + void* pPage = getNewBufPage(pHandle->pBuf, &pageId); if (pPage == NULL) { return terrno; } diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 32d0472a50..417875838a 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -1146,8 +1146,9 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { return true; } -static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); -static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); +static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock); +static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); +static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos); static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) { // the data is loaded, not only the block SMA value @@ -1199,7 +1200,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { pBuf->v = *(int64_t*)tval; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } else { if (IS_SIGNED_NUMERIC_TYPE(type)) { @@ -1211,7 +1212,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(int64_t*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } @@ -1224,7 +1225,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(uint64_t*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } } else if (type == TSDB_DATA_TYPE_DOUBLE) { @@ -1236,7 +1237,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(double*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } } else if (type == TSDB_DATA_TYPE_FLOAT) { @@ -1250,7 +1251,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } } @@ -1275,7 +1276,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1287,7 +1288,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1306,7 +1307,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1318,7 +1319,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1337,7 +1338,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1349,7 +1350,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1368,7 +1369,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1380,7 +1381,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1401,7 +1402,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1413,7 +1414,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1432,7 +1433,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1444,7 +1445,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1463,7 +1464,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1475,7 +1476,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1494,7 +1495,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1506,7 +1507,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1526,7 +1527,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1538,7 +1539,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1557,7 +1558,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1569,7 +1570,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1580,7 +1581,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { _min_max_over: if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) { - doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos); + pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); pBuf->nullTupleSaved = true; } return numOfElems; @@ -1599,8 +1600,7 @@ int32_t maxFunction(SqlFunctionCtx* pCtx) { } static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex); - -static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rIndex); +static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex); int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); @@ -1648,34 +1648,29 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple return; } - int32_t pageId = pTuplePos->pageId; - int32_t offset = pTuplePos->offset; + if (pCtx->saveHandle.pBuf != NULL) { + if (pTuplePos->pageId != -1) { + int32_t numOfCols = pCtx->subsidiaries.num; + const char* p = loadTupleData(pCtx, pTuplePos); - if (pTuplePos->pageId != -1) { - int32_t numOfCols = pCtx->subsidiaries.num; - SFilePage* pPage = getBufPage(pCtx->pBuf, pageId); + bool* nullList = (bool*)p; + char* pStart = (char*)(nullList + numOfCols * sizeof(bool)); - bool* nullList = (bool*)((char*)pPage + offset); - char* pStart = (char*)(nullList + numOfCols * sizeof(bool)); + // todo set the offset value to optimize the performance. + for (int32_t j = 0; j < numOfCols; ++j) { + SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j]; + int32_t dstSlotId = pc->pExpr->base.resSchema.slotId; - // todo set the offset value to optimize the performance. - for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { - SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j]; - - SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0]; - int32_t dstSlotId = pc->pExpr->base.resSchema.slotId; - - SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); - ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes); - if (nullList[j]) { - colDataAppendNULL(pDstCol, rowIndex); - } else { - colDataAppend(pDstCol, rowIndex, pStart, false); + SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); + ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes); + if (nullList[j]) { + colDataAppendNULL(pDstCol, rowIndex); + } else { + colDataAppend(pDstCol, rowIndex, pStart, false); + } + pStart += pDstCol->info.bytes; } - pStart += pDstCol->info.bytes; } - - releaseBufPage(pCtx->pBuf, pPage); } } @@ -2756,15 +2751,15 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex); } -static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) { +static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) { if (pCtx->subsidiaries.num <= 0) { return; } if (!pInfo->hasResult) { - doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); + pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock); } else { - doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); + updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); } } @@ -2778,7 +2773,7 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur memcpy(pInfo->buf, pData, pInfo->bytes); pInfo->ts = currentTs; - saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); + firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); pInfo->hasResult = true; } @@ -2982,7 +2977,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S pOutput->bytes = pInput->bytes; memcpy(pOutput->buf, pInput->buf, pOutput->bytes); - saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput); + firstlastSaveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput); pOutput->hasResult = true; } @@ -3087,7 +3082,7 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i } pInfo->ts = cts; - saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); + firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); pInfo->hasResult = true; } @@ -3420,7 +3415,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos); + pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); pRes->nullTupleSaved = true; } return TSDB_CODE_SUCCESS; @@ -3448,7 +3443,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos); + pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); pRes->nullTupleSaved = true; } @@ -3500,7 +3495,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData // save the data of this tuple if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); + pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock); } #ifdef BUF_PAGE_DEBUG qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId, @@ -3524,7 +3519,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData // save the data of this tuple by over writing the old data if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); + updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); } #ifdef BUF_PAGE_DEBUG qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset); @@ -3541,38 +3536,13 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData * |(n columns, one bit for each column)| src column #1| src column #2| * +------------------------------------+--------------+--------------+ */ -void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { - SFilePage* pPage = NULL; +void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsidiaryResInfo* pSubsidiaryies, char* buf) { + char* nullList = buf; + char* pStart = (char*)(nullList + sizeof(bool) * pSubsidiaryies->num); - // todo refactor: move away - int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool); - for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { - SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j]; - completeRowSize += pc->pExpr->base.resSchema.bytes; - } - - if (pCtx->curBufPage == -1) { - pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage); - pPage->num = sizeof(SFilePage); - } else { - pPage = getBufPage(pCtx->pBuf, pCtx->curBufPage); - if (pPage->num + completeRowSize > getBufPageSize(pCtx->pBuf)) { - // current page is all used, let's prepare a new buffer page - releaseBufPage(pCtx->pBuf, pPage); - pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage); - pPage->num = sizeof(SFilePage); - } - } - - pPos->pageId = pCtx->curBufPage; - pPos->offset = pPage->num; - - // keep the current row data, extract method int32_t offset = 0; - bool* nullList = (bool*)((char*)pPage + pPage->num); - char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num); - for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) { - SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i]; + for (int32_t i = 0; i < pSubsidiaryies->num; ++i) { + SqlFunctionCtx* pc = pSubsidiaryies->pCtx[i]; SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0]; int32_t srcSlotId = pFuncParam->pCol->slotId; @@ -3593,50 +3563,90 @@ void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* offset += pCol->info.bytes; } - pPage->num += completeRowSize; - - setBufPageDirty(pPage, true); - releaseBufPage(pCtx->pBuf, pPage); -#ifdef BUF_PAGE_DEBUG - qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset); -#endif + return buf; } -void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { - SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId); +static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) { + STuplePos p = {0}; + if (pHandle->pBuf != NULL) { + SFilePage* pPage = NULL; - int32_t numOfCols = pCtx->subsidiaries.num; - - bool* nullList = (bool*)((char*)pPage + pPos->offset); - char* pStart = (char*)(nullList + numOfCols * sizeof(bool)); - - int32_t offset = 0; - for (int32_t i = 0; i < numOfCols; ++i) { - SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i]; - SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0]; - int32_t srcSlotId = pFuncParam->pCol->slotId; - - SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId); - if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) { - offset += pCol->info.bytes; - continue; - } - - char* p = colDataGetData(pCol, rowIndex); - if (IS_VAR_DATA_TYPE(pCol->info.type)) { - memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p)); + if (pHandle->currentPage == -1) { + pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage); + pPage->num = sizeof(SFilePage); } else { - memcpy(pStart + offset, p, pCol->info.bytes); + pPage = getBufPage(pHandle->pBuf, pHandle->currentPage); + if (pPage->num + length > getBufPageSize(pHandle->pBuf)) { + // current page is all used, let's prepare a new buffer page + releaseBufPage(pHandle->pBuf, pPage); + pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage); + pPage->num = sizeof(SFilePage); + } } - offset += pCol->info.bytes; + p = (STuplePos) {.pageId = pHandle->currentPage, .offset = pPage->num}; + memcpy(pPage->data + pPage->num, pBuf, length); + + pPage->num += length; + setBufPageDirty(pPage, true); + releaseBufPage(pHandle->pBuf, pPage); + } else { + // other tuple save policy } - setBufPageDirty(pPage, true); - releaseBufPage(pCtx->pBuf, pPage); -#ifdef BUF_PAGE_DEBUG - qDebug("page_copyTuple pos:%p, pageId:%d, offset:%d", pPos, pPos->pageId, pPos->offset); -#endif + return p; +} + +STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) { + if (pCtx->subsidiaries.rowLen == 0) { + int32_t rowLen = 0; + for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { + SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j]; + rowLen += pc->pExpr->base.resSchema.bytes; + } + + pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool); + pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen); + } + + char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); + return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen); +} + +static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) { + if (pHandle->pBuf != NULL) { + SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId); + memcpy(pPage->data + pPos->offset, pBuf, length); + setBufPageDirty(pPage, true); + releaseBufPage(pHandle->pBuf, pPage); + } else { + + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { + int32_t rowLen = 0; + int32_t completeRowSize = rowLen + pCtx->subsidiaries.num * sizeof(bool); + char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); + doUpdateTupleData(&pCtx->saveHandle, buf, completeRowSize, pPos); + return TSDB_CODE_SUCCESS; +} + +static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) { + if (pHandle->pBuf != NULL) { + SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId); + char* p = pPage->data + pPos->offset; + releaseBufPage(pHandle->pBuf, pPage); + return p; + } else { + return NULL; + } +} + +static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) { + return doLoadTupleData(&pCtx->saveHandle, pPos); } int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { @@ -3788,8 +3798,6 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) { SColumnInfoData* pCol = pInput->pData[0]; int32_t start = pInput->startRowIndex; - int32_t numOfRows = pInput->numOfRows; - // check the valid data one by one for (int32_t i = start; i < pInput->numOfRows + start; ++i) { if (colDataIsNull_f(pCol->nullbitmap, i)) { @@ -4964,7 +4972,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da if (pInfo->numSampled < pInfo->samples) { sampleAssignResult(pInfo, data, pInfo->numSampled); if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]); + pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock); } pInfo->numSampled++; } else { @@ -4972,7 +4980,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da if (j < pInfo->samples) { sampleAssignResult(pInfo, data, j); if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]); + updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]); } } } @@ -4995,7 +5003,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { } if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) { - doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos); + pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); pInfo->nullTupleSaved = true; } diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index dbe0b6bb3a..4c58c0abe5 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -372,7 +372,7 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { pPageIdList = pList; } - pSlot->info.data = getNewBufPage(pBucket->pBuffer, groupId, &pageId); + pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId); pSlot->info.pageId = pageId; taosArrayPush(pPageIdList, &pageId); } diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 4d5532b9a6..2767fed937 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -371,7 +371,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem return TSDB_CODE_SUCCESS; } -void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) { +void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { pBuf->statis.getPages += 1; char* availablePage = NULL; diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp index eaf198a483..1a057c5875 100644 --- a/source/util/test/pageBufferTest.cpp +++ b/source/util/test/pageBufferTest.cpp @@ -18,7 +18,7 @@ void simpleTest() { int32_t pageId = 0; int32_t groupId = 0; - SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId)); ASSERT_TRUE(pBufPage != NULL); ASSERT_EQ(getTotalBufSize(pBuf), 1024); @@ -29,26 +29,26 @@ void simpleTest() { releaseBufPage(pBuf, pBufPage); - SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t == pBufPage1); - SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t1 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t1 == pBufPage2); - SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t2 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t2 == pBufPage3); - SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t3 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t3 == pBufPage4); releaseBufPage(pBuf, pBufPage2); - SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t4 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t4 == pBufPage5); @@ -64,7 +64,7 @@ void writeDownTest() { int32_t groupId = 0; int32_t nx = 12345; - SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId)); ASSERT_TRUE(pBufPage != NULL); *(int32_t*)(pBufPage->data) = nx; @@ -73,22 +73,22 @@ void writeDownTest() { setBufPageDirty(pBufPage, true); releaseBufPage(pBuf, pBufPage); - SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t1 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t1 == pBufPage1); ASSERT_TRUE(pageId == 1); - SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t2 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t2 == pBufPage2); ASSERT_TRUE(pageId == 2); - SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t3 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t3 == pBufPage3); ASSERT_TRUE(pageId == 3); - SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t4 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t4 == pBufPage4); ASSERT_TRUE(pageId == 4); @@ -113,32 +113,32 @@ void recyclePageTest() { int32_t groupId = 0; int32_t nx = 12345; - SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId)); ASSERT_TRUE(pBufPage != NULL); releaseBufPage(pBuf, pBufPage); - SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t1 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t1 == pBufPage1); ASSERT_TRUE(pageId == 1); - SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t2 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t2 == pBufPage2); ASSERT_TRUE(pageId == 2); - SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t3 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t3 == pBufPage3); ASSERT_TRUE(pageId == 3); - SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t4 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t4 == pBufPage4); ASSERT_TRUE(pageId == 4); releaseBufPage(pBuf, t4); - SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t5 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t5 == pBufPage5); ASSERT_TRUE(pageId == 5); From d2ed6ff0e8964d3bbb05bf09a1e95053a18f0bde Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 29 Aug 2022 22:42:15 +0800 Subject: [PATCH 15/72] fix(query): set correct page buffer id. --- source/libs/executor/inc/executorimpl.h | 12 ++++++---- source/libs/executor/src/executorimpl.c | 24 ++++++++----------- source/libs/executor/src/timewindowoperator.c | 2 +- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 05bdc39701..594955d469 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -297,10 +297,11 @@ enum { }; typedef struct SAggSupporter { - SHashObj* pResultRowHashTable; // quick locate the window object for each result - char* keyBuf; // window key buffer - SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file - int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row + SHashObj* pResultRowHashTable; // quick locate the window object for each result + char* keyBuf; // window key buffer + SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file + int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row + int32_t currentPageId; // current write page id } SAggSupporter; typedef struct { @@ -429,6 +430,7 @@ typedef struct SStreamAggSupporter { char* pKeyBuf; // window key buffer SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row + int32_t currentPageId; // buffer page that is active SSDataBlock* pScanBlock; } SStreamAggSupporter; @@ -991,7 +993,7 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t size); -SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize); +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize); SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex); SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 9b102de5bf..30d7283264 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -179,24 +179,21 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR } #endif -SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) { +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) { SFilePage* pData = NULL; // in the first scan, new space needed for results int32_t pageId = -1; - SIDList list = getDataBufPagesIdList(pResultBuf); - - if (taosArrayGetSize(list) == 0) { + if (*currentPageId == -1) { pData = getNewBufPage(pResultBuf, &pageId); pData->num = sizeof(SFilePage); } else { - SPageInfo* pi = getLastPageInfo(list); - pData = getBufPage(pResultBuf, getPageId(pi)); - pageId = getPageId(pi); + pData = getBufPage(pResultBuf, *currentPageId); + pageId = *currentPageId; if (pData->num + interBufSize > getBufPageSize(pResultBuf)) { // release current page first, and prepare the next one - releaseBufPageInfo(pResultBuf, pi); + releaseBufPage(pResultBuf, pData); pData = getNewBufPage(pResultBuf, &pageId); if (pData != NULL) { @@ -215,9 +212,9 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num); pResultRow->pageId = pageId; pResultRow->offset = (int32_t)pData->num; + *currentPageId = pageId; pData->num += interBufSize; - return pResultRow; } @@ -263,11 +260,8 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // allocate a new buffer page if (pResult == NULL) { -#ifdef BUF_PAGE_DEBUG - qDebug("page_2"); -#endif ASSERT(pSup->resultRowSize > 0); - pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize); + pResult = getNewResultRow(pResultBuf, &pSup->currentPageId, pSup->resultRowSize); initResultRow(pResult); @@ -3093,7 +3087,7 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) { offset += sizeof(int32_t); uint64_t tableGroupId = *(uint64_t*)(result + offset); - SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize); + SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize); if (!resultRow) { return TSDB_CODE_TSC_INVALID_INPUT; } @@ -3442,6 +3436,7 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n const char* pKey) { _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pAggSup->currentPageId = -1; pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput); pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t)); pAggSup->pResultRowHashTable = taosHashInit(10, hashFn, true, HASH_NO_LOCK); @@ -4678,6 +4673,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t size) { + pSup->currentPageId = -1; pSup->resultRowSize = getResultRowSize(pCtx, numOfOutput); pSup->keySize = sizeof(int64_t) + sizeof(TSKEY); pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d87e235ba5..f4ebf17646 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -3790,7 +3790,7 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes } if (pWinInfo->pos.pageId == -1) { - *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize); + *pResult = getNewResultRow(pAggSup->pResultBuf, &pAggSup->currentPageId, pAggSup->resultRowSize); if (*pResult == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } From 45616c6548163f9dc31fd541587e57f5b5b1be60 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 30 Aug 2022 09:47:42 +0800 Subject: [PATCH 16/72] fix(query):fix syntax error. --- source/libs/executor/inc/executorimpl.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index e1c9c6a114..f0518a72ab 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -299,7 +299,7 @@ enum { }; typedef struct SAggSupporter { - SHashObj* pResultRowHashTable; // quick locate the window object for each result + SSHashObj* pResultRowHashTable; // quick locate the window object for each result char* keyBuf; // window key buffer SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row @@ -328,7 +328,6 @@ typedef struct STableScanInfo { SQueryTableDataCond cond; int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan int32_t dataBlockLoadFlag; -// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded. SSampleExecInfo sample; // sample execution info int32_t currentGroupId; int32_t currentTable; From a5474d5e905cacc7bcaf481f5c8010fe25da8062 Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Tue, 30 Aug 2022 10:16:11 +0800 Subject: [PATCH 17/72] fix: remove unused logo file --- TDenginelogo.png | Bin 19663 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TDenginelogo.png diff --git a/TDenginelogo.png b/TDenginelogo.png deleted file mode 100644 index 19a92592d7e8871778f5f3a6edd6314260d62551..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19663 zcmZ^~b95%bw>2Ewwr$&*IGH$^*tTukwmF%2V%s*J*v1om^Sk%HSnpb2|It;od+&4V zoa$cPU0ofetoQ>F4i63l1O!n=T0#W`1QhFET>%E_-`>PUZuVaRVIis@3IfuY0RLeG z`L9i4BCVnT0^&^t0umSs0s{O81)hR{xUqnMoEw6G@MVI4U_0ji{V4zf0tRoTrsb-o zAkSywV8>)+>R@cfwZMB-^@YwyD6DM;-B>zQm zwGkxKQcxxlcW^c%;bh`qVj&ZPBOxIXa5gpPQ<0GRANjwQAep7Bt0NyXvxkQVlLtGK zgR=!QD=#lEGYcCt8yn+42BV9Yy{nNYqrD6He;fHW;i>)Wb4(Lwz%lEgc2fxov)9jlKp^AJdER4K}AsAQ#Da3h$ksi9b z!hc%mDAF+UFzK*jnSp%}5c)1KOA(cS(66LJDi;rm_3*sJ5hyl`p$We%Hv}nL{`Z5Bq6Rm zN&2)9=L-F6P}Q7)`ZlQ!kn#%By**WnRpd&iqH!Zu8%xLf65H3YSyGcFiI&v!TGZ;= zH<7(Mt%EFfow8r8TcLqxVuuP}g0VUc$PNqwX*65i-yhBOt77PZk#g;8upj?=w5Gv*)RR5~9v5=y9i*0aeY zF!`S7NZ+CE9qT{!SG@!Mgb77uYg#A(-Cv*8t59;}dHE9d8T*@dp$L(6>j|tX+p)*0 z*0(;HSk4G(dzI&g`Uje6CPTCJ-AX2V0N5=%roinhxt#_ZtZ zv{gZK8EPnsUvOba)evx&>GK3CO}F!wRa1O}#il$q_~$?ON>vU3M(SZ%Ro>|(g3;z| zMRf=vcZaW}No84e>0k<_Q5N=i!d1ww+yv)55?6$%Mn>^^{8vj-CT zCB1u$BG@D|@rgXxMGIGv-l&4(t`c&MsN&Q!Sn7qGo(GyXDsoFGlR{n$)eF^-C z!5g1b~U6d67(RB?5(}#aVB3MsiNefST$k_JG|x`)UazSW%P$ zQdARXO=+{1y*^&=SARQP?6csSted9cx9*66xsH*+JAXyeyg z7Fe~ebPk9#?}+BtwNU0OQ3lGGI`J4dW!I~=R`^g!a`R&&y^_cAfj5g)>Ebo10sYSB z3MMOs0$wm{l0GZMt2Xo6#o z>w{g0V)gqc zwVj=c#*O@gix|iXLER8g&F6S24%UXxG(8sbgK_>ku0)m*He0miKo$8(>gHoJ9q>C#^_| zJ$tWMot6Is-rta`^etPV(|CGB4~9@43qrMXSz_L5R2`c+3bKj1;`dGATP101x^=V1 z)v%n&%%#jd>p-v+ii&S}>z!Qh%Ql7pT(4;R-w5c6*=#IL17|2VZjwZDA*{_aKKsv| z9zUN;dxbc+3mp87_;K&btS?|$;y}UBVync^rQ7H51K@$UBX+lP4Dpp`Z_#CWBAmj-OA z-sb}|r|bL0W^|f@1JVnDl?ZPTRF15}?P!|^__W|ws67jgc+zHg6b02GeEFyZy|(Bt ztWjNc-&Z-r)}hHf)$^!H*Nr`5sDgKKK{p&7r3;_&C<@@0v=r4nJNc~5V=ZYO-N z{BqI? zq1l|_>U{<_r(Vl)Ws2tVo@jl6{0Of4_i|^L5XuIMhe|3w_D&0Fh)A5Y0X5ys@s#g* ziG_YRmy>hYNSwI_vXdMJeg8P%`XEse5?&Auqgbt}k}H{&bqEYdWHI!w$`IU@rd~Bwy5UTKJ(K~-BZc5JN z5KDy?tzH|4kDZL0=$=7q8lV}SQF2cO0UkHUXS_wInE-kLH5}L+m?fGUa5vheAW9Z3 zsi#oWs`{!8oq91>NYF`98W?r{C^K*0%_H!n0HU=l46mTt zkrHCD+HSfZZQ+RN)tV8%g?$IK^!1p=(wSxFBgSleGCQKXXR4$UaDzlC( zzG$1@4}*_!1>|yezA!nl5Y{qg>WCi!m~H4*UE(A!{W4?}kTJqGxGNw+touckrNU9$ zR@Tb0Mq)B&#zj$&(Im>u1LBETzb9I&LvcH@n}iYS6UR@Yy~)5$t*0ovuN5?c!OWKm z7Qh$z-=H_bbR?5LAb@SSPq-1IJX=t0aavrhJs*N{H||9 z;cj|!WrXC7^2ddo*g-)sb$|QTy@RH4-ZQPPM5~$*M2SS35jO1lhgdI(@g^ z;)r;l0pP66b(|4gOpP^t8n^qpmDcFFZyOE~#fp{Dr^k3eZx7s0xx0)9UuQK{YRwvu zoR~`g$d?9Sf`%Tt- zq(h^_?#x<{fbVrWF9uz3FfH`$ZRnLEe&I%tOZOR=2I09bPZB1)QFXovYRrNHSA0@ZbMY zu~*rjrpklIt>x=%_c5vYOBBzXy6Rg9Wd{4F5A#}BXT~74M9o{Z@8sj@ z@FO8}0lqkY0oDb<8;vHB4l>6;)#X8{;IZ?tGIBdfX7c$%?tk-{ou_()DTB!g5rhe4 zLuqIK%n!Gcb;M&?pz8NEXH2rN-Cq06p1;+_?S6R^bMkyS>&qFs_pwMdniX@>h-soY zRWR(HLcn>LZ-mtBuw80g?y{K=%Y6fj&Mytk_G?*tnrxa}&&3R!!HXwGa+ffivnG(O zmd6E++%ow-dt6|u=U&FRF?d%08Xk6#D%=(5JlozNYTWG&({6*hGiP{{hxc`S#fnEke@RyQe@ z)Co8LbY&wl_FZR^L&v?5e1&lIcLwZebvpfvtAiVfXy~-&1-wBqx6#l(cgFv(%WO8` zB=(-CTC&(XuGbALL@cFb0*3;<+vu@Y&GXyaA6Hp;ELz0}8AH9_wp_oCPliI2E(fZh z2gOV&72-B;HVNo6a%TkLC|aVmaIw+k2K8L!yfAr0?KM3vg687V+RvWdmYQfYW?Qa) ziC1rCr_8Fls|fTQk+4e;Pd>--IGlC8W_jcjo-pOiE(l+O1>xbIM~C?rt4*pzr@rQ* zeGlfYo<@xptmA*b6l`OKe5|M~54l)<8U&mJwIpdk?!sAQ`Q2|Wh5|Z2tJ+U`M(3wY z5vmpAQg}+!gA!5Z0Q`WQHit;b#quF$|J#FhQ_71!EXOFH{i;uF#(W!yB^4LA;7d;x zNijl~Hk=0$!rJH_TfLmEy?xQT=)&8E$Sq{j^VQaIGIwJu*knXx`se9EJPzYquWv7U zIFrY-rXSYhml9u}Inm%)b*lP~suw>Tc({dpKV|c)Wk7Z@Q1)u1!!fKCVez@IB6mBJ zq9FROCQOm1wurn*{Vy_h&})o-=Qf}kBHqDoh+0xriDkb9IDF`DHB~WFk(2UWfj@Ro z&lqq~`(LnYCaDKSjX0A5*S^T?#@E@HqfEvc(~Q?fpoEy0<7&50hPONTsiUN%M<%2S5vNr2@rRXHYUj>j4*3Ty`yRRB_m+gZ#XtBX=`l@<|9Xi=-G zwL*9?IjEA9Ls(zX10G&>H*t15E&LG1DixjP#D3L*=s055%`Z+)4f}$qj;V3NJSN4x zA0+j5(K8tca`)U8wG|7JP-x*G!>EAHEtNWH)MQGMEi;$z)sSLLq{BnXqEEw9nH|;D zpz%*`eXZ5M<~Z!eKv9AhvWgZW;`|;wUUa{5w$v z*Alz+%RD%lZ|sU1`hN{(j67lIW;I#GLIIRt}WXW>WxB3*@1z%5k zcdy|VM#d~*xXa>Mq-teI@{B}$wsAkXD*c+zBLEQzLiirSPre_|6-+Uh@R85}Bf_q} zk=SWv0JzcB*=7y_gC!+xm@-)tjv*rmy{HY7-@5F>r#%^9t8~i2f~cHJ=;@ zla9ohM>7(rqE$g3C;59a75?BafwXSHBdHmIR<%|vslU$}LLEP{NH zDX&iE=Y0#wfQ7Io14274o*tgo1isVTVp8`n0!u(-2p)e6>FAZO%{NO&VN8@(e zh3)+wtA^qfy)7Q=%@;&-Pit?@F8ww1&2nw;Sb@+k)?w%RjX zMf{;nio3w{pz8W9pP(FXk+!qgW#<8d1N=j%2Tz-nka5eH+s04{G{26Ub!#J zxsLe;=24sMlC~Zzw{n^q+>ucAu^f5#v2aw*kmVr<14yapr;o7wbb4KWd_*vRvvA$; zflx{>D^{ASzc< z7AIfN6J)C@63GnK1$-I?zz0ktn_RSp1Pvr|0ZH60Zp^k}gT-E1R8QGigFVr}%DG4y ze}YGP5Uas2n^v-Ot~l(Dc5t+f_Jd{&ykMdCf zl7{{}MK6+G=RP>0gt|Vjy8yCd z(uXJ~Pljv>%r|N~UwTmRGjZ)ZY)q;@WXULd&TAD16d1e$E+T9IDL4GsaS{4rTMHge z$Fc(3dFxzJ=*VKStdLGihI{&_kKdm18GOVv6~htVkfT}gz;Pz(0{#avJ-kttA| z`>MM+N7V9hbR4pAq&L6)F2`fk4*A;LxX+*rWj|~7sf00;lXJ&sdHwaa9Nk#-k@qD+ zOL^^)!A`I_F3Lg`;h@njFeRH_RGz34HQL|}NPfp}(4PO5)*69QeP(h2rqEnF4=}Z$W(ug)rWB$Jk~n|>8~OK_jJm&c7oK?BDSiz6V2Y#HWUhu8aHh-54(O1=y;!R4@{Y6(wdOs^)}O9{uyvqs$eGr6vwGfpHyZmZ<51*f2-l%#f~r%vS`R^ zvKVd%xJSdmp*GXl(|R4h8HO*v=N8-%1=%;)zIWVm`*sh7M^bNr^Hyn4h`V}6`rU9| z)&5o7HV4$QLOHoK^!+^M>_b=2(eB;`2mHZv%haoPL9y(8Olli3t5GbWHy%nuX<^bU znAY-I$<^xkc=Y`C(HJK7d-`D;I6NPte7V`iJ>BIy2=4VnU%FyU*tgSITDqqalf(df zF%ZZ`nb0vi!a7j}`aBr>2x7G7L`o|hY6h0>`&dO&xzy)}+o`@;faKDU1Cwdt-qN>A zbP9d-hn3U+c_QrJbo)h$3jEyTK@11_&1Koiv$M)}S37W_`!2$un;xmZ);b<2s-qxp zbnkZojZ9{{`(xXEDL>9^84g(rNh%Qd*T?5oaHW^8>SI4XiQ+>sv5L!mz`JY~~lq~WYhrhTM( zCTGseD5HnHh*?AxCS9#>v*^&2)zj;)u#cUt+e|Q+4XwBsCa$wXbj_33Dr-ty zXT0aC7yM6(pEp5va9$+jNior6YE_xvTy>6rZmK*z^E@Wm0hD2kBbbh>89@fJ_7b;z zS*4~$QA>nPY|Xu{iHiY~vfz(x_+Wc);t|MfKUr`&U~oRukP8Tf8D3j)3_hueyBLK0x8e54)o^uIXTZ6RokWP3{70bLN*-2eGykgitt}$)Cz?$xg zmXNwF)*1I`NW-%L{8hnP;%psWvGFaf+xk#j`R$}nwTJpepvS7{Tpb`7M zbS&{;;)biD3}kGt2z5u4+4yqkt4||i3r{2nkjRaQiQ`-nM=nw+#LfC0)UkF{pp5>E z?mA6(eT{lC5HnD3A*K7^29F2j{7!25)h_;h=92GrUz{xS9Qy~oQ`@}zWJlp`sj!Xo zg9C1q3gNZ)Ym2kd7qtedl#JnRg?B8Bf_boFT;>ZwI#eNGACNKn>sV_PrhWo%=QUx7 zNJPc)OiS?rB2ahqCy7J%>m@5cMNTpMP}mS$osLkBQOz=m|2)f(1GK>+`b9qNy&`O) zD|hh*L{NUE&X(_P`l$;g$dwv#ngtx}BC>uj*p&-!m{wt7!Ld5s8FlKX;8u05G?>k) zCtAw6g|HVTZg)BQbA(UT*`Vqk6aev0!g~E+}h;&H*iK zbC(Z+3xk+S-A&XN!uP`IT1X5;rG6=p#{?3p@AADtn-DvfRZst*-0V%HP)o^b)kptA znXxygvgoGE7Iqf7tZC{B295`SK%^C8<*Xa8`WLa0J`pOa!17r>%EO?MEPZ0Qivg`xFR{8$xvYTC46a}2m9&;TTB%X| z*JLQt4M2W6cOWB1mq$9hTS#X5gjAu76^#j^L@wUHv$PgYG7%9(pAyr zNai%OsipH5d%}*;4rltm<}=l`CoB7XnNuhI>K{ z_W+nRSW*_hnLh>K=rp5pt)8FRvnZ?k%!;x@?KJ9?%g-WKZ3i(1N*7g`h-g>>9lr{8 zeNb=1kCR8V0N^(hm$s|HS($UmV2hR|Ja{-N_jTBL{SIuZJdo6H#G4{4)ExHX#(&fB z;-9lz9h;#mmIU`IfSHb_dth}im{$+Qj5L;Pa7|#bo~*qWtXfb`!a;7qzKJmm$Y>3^ zEj;D8#0e}(QyXh{*~n2obf?8D#B7b0sZr66R%o)q@DwI6sH@2{@&_m@2ND|KGE}ud z=h}uPG#H{9RB-1rug9?UW=gZ47BlIibXwc9M1OP|?-9uC&;GroYYM{%+JZN@DLIVi zqIH3q?r>wiLPnSq?s0th(Gq0CNkg62`oBS13~gCKG9l@ZHTQg!{l-Ko?IM3TX2z|7 zvE>|{baX8^<>+&*cQ5P}?N2yIbvVJ0Y(dJKGoe(9zh-OEne?(KfF&%{Bl&GJ@{zKF z4iElDA-_m2wyejOX8Aa{-TAByA+(VvLVIez>&%qdmY%#Q*QH%S`Kh;1H}nhp+ODY2R9af3X#c zmGUu4jUwfZPL~FDMX-q;2^l3xbWXD&2iph&a#7tO3tVOViouYSum+7}$gno5ie6d8 zr=Pn-xQt)Cz-2|=5|iTjH^lg*>D2fR_~Y^?@%fGk1>+SJ;^~8Szdr^zXlIit+a1o) ze9@WV(AMHQ@z4#@K*@9?+L>+nC)cMmshHpourZp0LTM%2Eztz^$*9HtK98q~M%`zu zbY;xeL%CZXwv20+$LMpm6bnk0Xt&p?$`jXiu&E~OT+jZ4%Q`7D=P@FwR@-?F!&)o7 z>jWSz?d@!>b-K$sIaMs@=))DdJeabIMv=&fy8KSHagTEj^vm@yGC*50o8Il#1mN&X zaCG8rGVHciJfg({E{ze(y(=<}?J6jy+WO3Od7(of!@9`)JO%l3(C9W(>jFu@#{ccd z!uzB{0iH*6^{5u+9fgHnfSoyezE0%t#npvHRb%{gX7n`*La@>g)8{3Z2G)%jcd|R5 z*iiGu3A30n@cVY^h3IrqS`MfrwKck$Y9Y7>T3z{12rU_kR*N@N`eOGs`_0+U>MRvS683k&kJ`y&qrUXWhxpN`>6F@RIm0{aX`0io8nn4{ZqVurzju;f(Yec-JE%fH@KgQNwu; z;OgxE71Q>+EBedQQ3H;A$@_rPn{P3)RgJ$$ks?!W(fcw7LaS^f_@Py5Dq`R1+Wy)Q z$b(+Cc0mw3c=_q5tqtkXd}su)?T?aK!oLI^XpwfGm=ir|WJPDWV{sdjL1Vzr2~2s? zBA&|E2{(aOscPV_ae@@h&fgqe;P{TMbh=6PDbB8i%L~fZYZeA3-$$*pur!wJY^XE>W@-fP(%h086VA@nsdKKrTq~#b=@-Eb;%#N)68qw z<;$-R2P4%F@z-cF-?Y5>q`x%&SO4Gh%k{NJQyCI|W2RTTwGW*goxR#iDT$Cp9oxN{ zCe;q41)bVL)5vZ$(BgAjApVfW!s1A5p!OlD%x~_u37%iozZK5eA-bZ5tgQTT)Y5Yx zw5{DbefC~>;+Fz5Ii@)-*eJWGo_p%VAOUZ_~(YNocxsQs1pyt2*_EmK>2+XH?m zaS_UHT6k*OgoJe^YCtHVjc#btI@KgZ2Rn$|q67AFS}iFthessL07RB4vRH$1a`i4A z@BNgIs+kRq7LGg@)I$yNZH)&Uj=cu9Dg#;^1Q$no3HZ2m@_OsaOS`XJt&s&!C9D2q zkOoWLcRfgXdMF?PRwfOlDa4IVhOPxr&@Ro)OBkXP0^my#aKci~jb6JW` zMADqd>DkTw2T#>X+t}V8LxmPqYaGg=rc#|LUF2ZSxYG>O{vK)jRA<1^&!26qCM5nv z#prW5grJpt^(oFv6orctY?||GDutc_WR!wO_NE{kVvJcs63d}NTd!FS{PW>nm{s*r z5HkxOY9R{@WYYJZ3pkmA`E)y7Ud~p?Bg~iEu_ru^mXl5I&S&*u!bQU?rn~e@2WlM; z>jE$vBBX&DX`+nkpHqH(a5=d$aRbdlD1ptw-?bf9tJOf98s$;1GpK%nOME2U7~xcY zx?UiyZ&L1icy2$n5@rX`Z-g7@+7geEH&9JsILA3%%|H4jOV}V#{yGMX*E?SFBx5M; z1d}~YQm=wSM^VM82_#r%DYHfNESsCV)GXx2q=8}Le35LrBaN1#hTq`Rz}rprs@3n+ zcrm&@0EG^&Bna?%*RhqW!X8a`!mRXVI@9}+dP;$0MQ1zU5-QQdG<|8M0WjrMw=9`DUfvAts4*-$F1kgP%j%HAB_#I*sreutx}aZlSkMBrFLMP|r29HK z3|4EJ*i~^jgM?-rJq2P|Hl-~jVy>glF83D`2-$4+1KT3r2?;qy_pb#Jk2ei%gR4TQ)GgjWb zRhUhilzvubyY^rgBZUo(`S=zT0i^{o55Brr&SWdNwcqZ|9A8x~%{#pl7S`yw1tV;h zj>o5;RvActiwC`knf}lj&IsD@jpbAzb3%rATYw?TzL?gNDZ^-I>`kWJ$ANfWducf6 z(k|SI;RB^*>A<4SRnd7@+9u~1Ke^=P&R7N+ocuX{P2r|xO9IMZm z3C>NtdV-IfUi=`k*xdaTa|+h|w#t+?Vo?Su24DovmrOX>za;ID6nx(OEzJ>tco$xZ z1?Iun)=*{#k<83ohcajfjimfwCZjX13qmsH39rBx;t3N78-_Xs(wVF1)D2^ zQxRxcvkO#DRjjuV}LujQ}fv3*2n|AnhPOw%FVI1d>y@(UQJ8^u#&5eIJzN(S&ZE(nv2oGX@ zf<7xUzZ(;b3eW-(1I$!FiIXh=VPyT`cuw-!)KQOJYhsUEBijGZwYI6CWGWEWDt|tl z%Ms{)nGmq7d8V=KakH*y%{zo5&B{FSi}hDJQ2}wy?BN_ku_%5X?d7BSC{vKmh}DPb zwmzRO&|xaH7k;PtphH+d(M`6YW4|QSX#V}$x0-Jtc(cMq;ef}fzLAk(EDR1zUZ!CGiTo%PVtEY&#Q7Pc+*8@wVrL}XeXB=Lv_0e%|lL&-sFBx7sAbji)TUmF1rZJ?2^4WTrFOu$~sJ!(P%pCv&XQGw84&9Z*p6&+BomyXg_pO3Y> z^U1Av(8!&sywxy*Qjk&|Z_{SmDR2aC)px%Z{P~u8V8a5rXv_jO452p`M|m``ugSqG z*s+afBX;us+27+K{=C=~tiXqfo{nh>B6RTT({)lxCq)Wp+j{6bF6FnH7POK1HPS)x z*$t<}0^aWw+>-2T@g(lf{>38#CB_s~9ov(#%m`wHQRl;tWk2npHHGYsrj? z7n**Xw0YEht^{2j9z6S)0>j~~3{`>Ur6my0$p3!u4gB|Mz0<=%hS|Gfw9HIK5Ys!Om?NL&_)0EyvjFpgU$@^pXPJo*rM-YmhE2Pmy$_jrM4Y@KFzN8(JliPM4q+ywXKuz$D^phIz)_NRn z&qL3N%aL+yag8K$%Ve~4(qXrg%l*bEM`j^ihr4Ya&t}M6M%DUs=bWnOLinNcJUs_< zeDN8?@hqM~AHm(%Y25d*I3bDSs~8%vUwjNI$#O~}DlBdZ)dN-XXWr`od}k_;f91E^&eZ-r53SkNpo3<`|5&F{oI4n{a2OGdesi3lwZv>5?;udt?;WE2@rdxY^88zrl z&@}noHS3?LDp)2?{8zURpJP^!4;qMKnNJd-fiO=jz`^--YV#5v;o;L$SFD|h)HtX+ zuR^U1%p(fkMdh@Hlp z&!5sp&nCbBE+8i<9T=XFKJN#@Z=A-R4WWM?S2V;*b{R-lRE^5HL&|0g7mB1MHju+o zc%HH+x!wiUqz|v!??&2!zFKPGR>h<{jh0-9UK)NV5%UNe2ptO{&d!e2fgT>!^7`2JX5E@x_z=z&+Wbs%}lL6 z$KqA)r78glKxe2Oc=-KanA=R8!BP%k#~+n#w>pHsw&R530=}vyO|I4SnUnG2@L=Ra z&5PTU@&!i*O2l%vRBA&CM%%8V8vNd))*iQZcuv?KXxPXVe7pWFL$MSb4gPM&VhDk# zdb3Z*8!GeL8HB1q?23N;ew!eKfHaCEK@cdUg@hFuZxYg8Bo6lEw%dRR$RCNV_=Yen zI-&v5!JzOq7?4xeA-6xw~3Paw1&;A^-ZT+cM;@=eS*rD4oq%%xO;-%7UV`zSNlp} zRFqb}@7stC$+wa8VaJDCPZ_7rwKf_*nv0Mp_9Y7IGg?M`~t&MXx+C zbSQ3@5)Va5HThyXWV~k4&qm*TKH@@z%x~m2Bk!-SU;ec_xqcP?K3(y=*_=IC`X27W z3^rjYFP;_sFwR8oZ8~FKgQiW}4P#a6h{JR(P z92i5x1rBsac&3e%Chm8BqKVJc2$!-_i3ca7cQ4wsUJB51qMA&to zmRT!zO+I_;{HCDYNL^rD@!f9o`AEN6KmnmU{<<+j<1ejNBP z?WN(YiSX5eB+rMRvPu@0Qbs!}BhgAg*eA(M!GLI~=brytZ7ni7-|0Ft^_o%8%k?!K z=OG#A!G>^%8j_zCggct-4xOU-a=3vwI}A{AFcDaaTGa#9em0@0=eU2||81AY=Z9a^ z0Ob^E7h2Sm`iW~3QPnM!R28Hwvrqvrc%3u5d-r)ac|dYcco^yDd#>oI;yc;ridB|S z^@@I3t=;_+8zAEumEzz1{Hc(?y#8_Q%MvEu|Giw%5T8E|^QbCt_eaB5VGdmfqLfIL ziW|T!^pfC-4tOzn;F_5h?R0-?<|^%-{Tx@FQwuj7y4eG6!tM7L7|F}>x$btClOUewQ%IM^oT4xQ&sG|3%OWUntPxtMCo%zVRg?#-rn)$?# z!!79lqKZ9QyIAp)xT((Z5K?16<$|V!U8I>RG2$kbusWL`v-E8N8G!Y4%aFd&|9b0Y z(J__^Sn{$FA8Dn??O@`|@pB1Ee+L=s=A|_}3!{fol$3g)>-(d${$>(z+KCop_E|%~ zK`9`}ECoX!Y{yL<2mzw$o~-tv5A*q9wDI@|!LlmG1y+Z4!rS`2pjc zB}Tqv;zzM&}e-H-M4Nj zHcW&W3%sz5f|3wNi{OlgvgagKK?n<`y0@f+fe=>h7z6Wt{p6$CJu*J8GWUtH_B^go z?dcpDo9%E>9DK|~ugSP=?RS|yRJICdB-1ZRwfRQk#$CVJJGy^3c=^u2*ILj==iY4* z@_bR_E3)ytj9E;2fdJ^Lvgjb?g7wQ<-b<)9Kr0-F%gsb4+=l<{Xqxm1a|_J=m`b_C zL__0%sM=yb7B6quFEmf$CM+b%;I*vC?4~fAm89KDQzv`?&r7gSBzc5H9Ka-m^!|gm ziKJt9h6Ca*L_=6TU3fsMPjj*9fv;s#sZ_z&lR* zr}Cf#Cx-xJLiyq{_H<0Z$JeA+Hx#`@712U(6)r!UL_e!7@j8{qWbOZ*A6|v=C=N9t1g~uc)ZLtj0a5-N^KE?g4VzKIpX~x znK=4b9%I~m{01N9X&afA6Q$OxmYPRNVWY)EMOD~9UF}LEcSAwJ6mpkA5%Jxkpp)v_ z*VAI_BgDsqdjIni;jf|>HDp4Aa*mRYX(2DHdJmpa>2LIanvAKC=b5MnPdPG5*oxC> z^s9xolIJfnLyOgLVYoCTgPzw2%Lk0+Butpww8@jv2$IlTM%-8sjqI6%D<9)Z@xTq1 zZvzG3+LJ|sP8W+aq>(e78o~&hukZdq7^_`$1bzY0BFc9kDg|o$NL!O3AzZNOkGgM9 zVk&e*3#e)3LdR^bDpX>x(YG$x^gRj=7)A4DbA8mJFeIkDxSA91x zYV@s{ujoqrZ2cK7S(PjG#~YG(g!~qu?D65fN*deHeG2~XFFJhV?c)#v<7WY}V-U)r zEFq~6nrF~BCy=i&*Ms6VhS;>2%)r@SGq_#(&NLdBU$3o?cV~e~FSrl4T1`*b0ejUb zU_X?Eq(%GN^4!gTqRG+Gnb1|&f>S5aIq4Zbu_uA{{jkf*Z7_2_NOy$0OU-s!UYX}l zj2=N_GX9CF3gs84Lb^p>IAilO6HkKF0oC60dOt|R))==dPGf%)(&>HZ%y3zlvlq<6 zg3>>z?^38!ZX2Bp9bU5q-vTvjh286+>;yrk91rYnu-!42#f}I!tI}Ocg`T#`nQ#quYa;@q@qV(Px%#8&Jhr1#ZnrtkW(en0ndDtq_y$YVH~omG@lt0j3~ zr?v)$F9R0d623c~bw8x9H}OVhhIQ>{mhK3o$^lZpwbCK1LDiBfY6?!uG@OijsMIfJ z`^{&AU_MZpU60FSC@@)tx|ld+Zmn28=91sLa>g{q9MWD)4%svHl-Ebq^G@HhDIy#VjyUe zw2IuD3&1Ah--{n1blnHqX64GwZhI<=p|AGhN>^-XZ) z$_Me|T4fVIg*(B@Y6RU{GPvcCp0vVNG9dP9k#b{5lFvTT_p(un zQ+cx{?#UBUnpwLpqry4^FK8*gq0E-KB^a&F;twt*4JGD=rSNF0=<{RZBi&B7b z6hUQumh=w|t6Pa=A$f3}TkQFn%z@ULZXzzy*`3S)+@Yx>t6Ca#C)<;r26{6wG7HO*MnZJqE1#e}of6@KXgC~JH9N49I5)J1u7 z>p1myR)SE_KUj7YbBk;mx&Vm#=HZWf`6U=IqjIw0hE;YoMQhJ@N+VRXEZ%}fT=aH5 zB|atP1VkCel@_m-`}QRs@f; z@T*N-q0+6VD^$FniGD}`5=8QcM+X3>Jhz4J8sV5TAY@txW|=QJ)Y9}3Z7@Ce5SK9@ zi4HL8Poq;up28*!5*-XnMlBb>kM=Urm%N=HrK-SRwb4 z{weELkaC!na4mu*;d7*tqyJKfP^X)chx;fWWqYHHT9{GW8As;;yFf5BJ6m!<<#7UBN&Z@-9MmWkRlTD1YLLRX+c~NAJJ9>tb#!B(ai#W z2uo;+DEqB5F9kjfr`9C___SLl$vpul+4)>y;EXDcBuZ6VMs5Q>9wn)L2;&@^^_V|9 z#!o4X8@3VD{0Dp(k^$zkiI+x z)391kHFrF*rSuD;S7f!&D9)VE3*LC9nOMeK4Xzq%3Onje{Mc{dkz5SBQjd_b)ts2f zH=MFBWQCAik&~HSN>`xSPKv&Y(NY-8fZ6lm8jkUU`m&h<)B?W>hOAx|KSrUE6o^%w zntZnE3*F^>1%~a3t33aw8gI#tK*_%8#sJXJQy$>N0p|&$YMg2^xhK!O;GoM}i2sDC zLMxRCX^S$#YP2-EV4;qj<+BD+Y@H5+`!WEx>;l6}!cQU>P&qI^N*R8+fXxtm<_D0{lpFJf1D=a;*50$h+e#PHr{GLQ|#BGKfS>9paxq zHG+o3j!COeZ^Oo2vHyiWm|LMIgZGnXQk=bfQ8rv)SdqSPf!kBNdkKy*$TIt8M0OFu zR*>074kP=ZP!R$!C}WVW)Rq(~nUxRAr4_FBDE!ArmJ`ncHtWcS%SFL`S%pm@o#Xcz zEY4S!Q-ic%0VuF2!XlgZ5&6VU>4};=<+!JISU6Te!%xS-0*omiWk0y6e0RdeY}qcq zT)C5OM@yp%76(vuD=W#4iulq&@A*6{>E_Mv^2EQx{;v zH(u?J(%#X8%5cryE>&$8=v7avzWH?oO;rJIj_i7&9x@K!1nR@T%l+;wbL4&(H{IG1 zN~F9RSvKz;?SnQlbJ3!XAA_w z2;V$!Cn>+QA|YpuslnmIgi$pk-7xy!?|c%cH+8VXxd(=8v5liibn@FTI|B}IyFc1t zG7MGF7hi>HdeM-*pEt`0(s(b~GH*9?DN~I%FNy|P&{@Wsx3g@8T_%oFu2glb&&*R5 zeo=ld3T!Bu^^U-GKI6UxszTKl*JUhSo-g;03~HGChYOVckL=&@BX6c(X(Vmt&4SO7 zg^u!D%ub{U2)r&3NHM0<)!7@GH|%Y)!o&$(>L*B$pFEZ!ju3eZA>CmATXh^oDi^|8ja=&cntNPM_-7q z{QdXhy2}Z6yK*_<;Rd;>ZIpSfgJ96nKGKvkJV2j@PUZsxgYn&;u8Y6Bbv@fOIEag2 zw!tCH9e3K+55$FME{=Cx=$jIXDWWsE>kvLQAU*y@IU!iaA5Ko59^q&;vb4r}s+-tu zyZ@>9@%>xl%rmCNWkh(MzkGVEShjeCa~qu{y{I~QYw4uDbHlT7$9+3v@GwVwqN((- zveqVgSHD(`K)UIZ8}ZAZxG1%oRDyVKaii_~t*bbzlLwk_s*y|xmNyl;-dvsKgbIum zv=NpvJr~44#WvBZ9u2W&+Y9ma+x4E@7pI>-HD*tr%yK-UgO!fw_a2Prckg4rjH@m> zVp)Z%yE_eIhr_;lJcoxJx6{yEe&d(l7qh4G%9gaI6gUVK^BLz;EUdhq_;_O>Fd_Y=_HOM7TzSA!P*R#y*YR_}eA4ZUH^kJ?FxxB1HmoQ60J<)gY zsqjf;Z4YKpqcI65a6d-`^v17!_Jgr(4!)Lj{!-2rBo~fx=nk7zrxe7;&tNA6%lN^? zN!ODaD>|3$-s$v3ud)A_ zH#%yE*in7us%7yjAHE3TVs4HDKqK;uo%Gol+kt2@W@bWR5k4m-jk*c(#B^z*)sqGS z$_e+n@b%=DeR0c=*T;iTJr{lYNQnIG@tqVdE2F8nOzd~YvEq5n_dFY+ixA2PVEif9 zT)sN4x?pKsuzWUxhcgs8*=!<>PY6s1mhp*~lcQHv1gdUS^%BHH95~d9r?>2lr#U|H z;jOzkWP5-U&Gy9rCrS7Y;?@wyiEzQ+z81#8n22_euI%(kYuxm%OEo zbfkP5P1XI0uWPP!my{RJ&_)Y={}T*iW=P0fCbi0!Nt=PTH|f3~T}d6A*ZVAn^YI X90YRHzn!lF00000NkvXXu0mjfHCr0+ From d338e4dc60a9834d2cec4f2d9f0a5b041ee70b04 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 30 Aug 2022 10:38:32 +0800 Subject: [PATCH 18/72] fix(query): set correct length value. --- source/libs/function/src/builtinsimpl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 417875838a..1633065ea9 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -3627,8 +3627,7 @@ static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf } static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { - int32_t rowLen = 0; - int32_t completeRowSize = rowLen + pCtx->subsidiaries.num * sizeof(bool); + int32_t completeRowSize = pCtx->subsidiaries.rowLen + pCtx->subsidiaries.num * sizeof(bool); char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); doUpdateTupleData(&pCtx->saveHandle, buf, completeRowSize, pPos); return TSDB_CODE_SUCCESS; From f3d5ac49b1200576b2ac8962ae8ab1c4d6ae2505 Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 30 Aug 2022 11:16:27 +0800 Subject: [PATCH 19/72] fix dnodes cfg --- tests/pytest/util/dnodes.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index e530695d1e..89b7fe00eb 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -36,9 +36,9 @@ class TDSimClient: "rpcDebugFlag": "143", "tmrDebugFlag": "131", "cDebugFlag": "143", - "udebugFlag": "143", - "jnidebugFlag": "143", - "qdebugFlag": "143", + "uDebugFlag": "143", + "jniDebugFlag": "143", + "qDebugFlag": "143", "supportVnodes": "1024", "telemetryReporting": "0", } @@ -134,7 +134,6 @@ class TDDnode: "uDebugFlag": "131", "sDebugFlag": "143", "wDebugFlag": "143", - "qdebugFlag": "143", "numOfLogLines": "100000000", "statusInterval": "1", "supportVnodes": "1024", @@ -484,7 +483,7 @@ class TDDnode: psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - + onlyKillOnceWindows = 0 while(processID): if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): From ad35a67f2fba26ca02c7440d4ab5cca070e55641 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 30 Aug 2022 11:30:43 +0800 Subject: [PATCH 20/72] fix: add table comment in show create table result --- source/libs/command/src/command.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 7d259fe06c..18d839e109 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -471,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName); appendColumnFields(buf2, &len, pCfg); len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")"); + appendTableOptions(buf2, &len, pDbCfg, pCfg); } varDataLen(buf2) = len; From e5f5d3710dc86ffae0baa930a6f85b1f51078e5f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 30 Aug 2022 12:48:06 +0800 Subject: [PATCH 21/72] fix(query): set correct value length --- source/libs/function/src/builtinsimpl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 1633065ea9..b71d06231e 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -3627,9 +3627,8 @@ static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf } static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { - int32_t completeRowSize = pCtx->subsidiaries.rowLen + pCtx->subsidiaries.num * sizeof(bool); char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); - doUpdateTupleData(&pCtx->saveHandle, buf, completeRowSize, pPos); + doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos); return TSDB_CODE_SUCCESS; } From 39569cbd9a8c176c319aaf47c46671688c425895 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 30 Aug 2022 13:49:35 +0800 Subject: [PATCH 22/72] enh: move some tables from perf to ins --- include/common/systable.h | 6 ++--- source/common/src/systable.c | 36 +++++++++++++------------ source/dnode/mnode/impl/src/mndShow.c | 6 ++--- source/libs/parser/src/parAstParser.c | 6 ++--- source/libs/parser/src/parTranslater.c | 12 ++++----- source/libs/parser/test/mockCatalog.cpp | 4 +-- 6 files changed, 36 insertions(+), 34 deletions(-) diff --git a/include/common/systable.h b/include/common/systable.h index 01c9807627..882c54de95 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -43,17 +43,17 @@ extern "C" { #define TSDB_INS_TABLE_VNODES "ins_vnodes" #define TSDB_INS_TABLE_CONFIGS "ins_configs" #define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables" +#define TSDB_INS_TABLE_SUBSCRIPTIONS "ins_subscriptions" +#define TSDB_INS_TABLE_TOPICS "ins_topics" +#define TSDB_INS_TABLE_STREAMS "ins_streams" #define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" #define TSDB_PERFS_TABLE_SMAS "perf_smas" #define TSDB_PERFS_TABLE_CONNECTIONS "perf_connections" #define TSDB_PERFS_TABLE_QUERIES "perf_queries" -#define TSDB_PERFS_TABLE_TOPICS "perf_topics" #define TSDB_PERFS_TABLE_CONSUMERS "perf_consumers" -#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "perf_subscriptions" #define TSDB_PERFS_TABLE_OFFSETS "perf_offsets" #define TSDB_PERFS_TABLE_TRANS "perf_trans" -#define TSDB_PERFS_TABLE_STREAMS "perf_streams" #define TSDB_PERFS_TABLE_APPS "perf_apps" typedef struct SSysDbTableSchema { diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 9ca896c9ee..dffef21ac4 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -240,6 +240,22 @@ static const SSysDbTableSchema variablesSchema[] = { {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; +static const SSysDbTableSchema topicSchema[] = { + {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + // TODO config +}; + + +static const SSysDbTableSchema subscriptionSchema[] = { + {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, +}; + static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true}, {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, @@ -260,6 +276,9 @@ static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true}, {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true}, {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true}, + {TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false}, + {TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false}, + {TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false}, }; static const SSysDbTableSchema connectionsSchema[] = { @@ -272,13 +291,6 @@ static const SSysDbTableSchema connectionsSchema[] = { {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; -static const SSysDbTableSchema topicSchema[] = { - {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, - // TODO config -}; static const SSysDbTableSchema consumerSchema[] = { {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, @@ -292,13 +304,6 @@ static const SSysDbTableSchema consumerSchema[] = { {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; -static const SSysDbTableSchema subscriptionSchema[] = { - {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, - {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, -}; - static const SSysDbTableSchema offsetSchema[] = { {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, @@ -345,13 +350,10 @@ static const SSysDbTableSchema appSchema[] = { static const SSysTableMeta perfsMeta[] = { {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema), false}, {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema), false}, - {TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false}, {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema), false}, - {TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false}, // {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)}, {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema), false}, // {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema), false}, - {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false}, {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema), false}}; // clang-format on diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 9499c90c57..5a998dfe98 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -88,7 +88,7 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_VGROUP; } else if (strncasecmp(name, TSDB_PERFS_TABLE_CONSUMERS, len) == 0) { type = TSDB_MGMT_TABLE_CONSUMERS; - } else if (strncasecmp(name, TSDB_PERFS_TABLE_SUBSCRIPTIONS, len) == 0) { + } else if (strncasecmp(name, TSDB_INS_TABLE_SUBSCRIPTIONS, len) == 0) { type = TSDB_MGMT_TABLE_SUBSCRIPTIONS; } else if (strncasecmp(name, TSDB_PERFS_TABLE_TRANS, len) == 0) { type = TSDB_MGMT_TABLE_TRANS; @@ -102,9 +102,9 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_QUERIES; } else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) { type = TSDB_MGMT_TABLE_VNODES; - } else if (strncasecmp(name, TSDB_PERFS_TABLE_TOPICS, len) == 0) { + } else if (strncasecmp(name, TSDB_INS_TABLE_TOPICS, len) == 0) { type = TSDB_MGMT_TABLE_TOPICS; - } else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) { + } else if (strncasecmp(name, TSDB_INS_TABLE_STREAMS, len) == 0) { type = TSDB_MGMT_TABLE_STREAMS; } else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) { type = TSDB_MGMT_TABLE_APPS; diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 82b5842663..1fb0642eb7 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -365,7 +365,7 @@ static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt } static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { - return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS, pCxt->pMetaCache); } @@ -411,7 +411,7 @@ static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt } static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { - return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS, + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TOPICS, pCxt->pMetaCache); } @@ -506,7 +506,7 @@ static int32_t collectMetaKeyFromShowBlockDist(SCollectMetaKeyCxt* pCxt, SShowTa } static int32_t collectMetaKeyFromShowSubscriptions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { - return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS, + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS, pCxt->pMetaCache); } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 51a4295ce5..73e252fa87 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -142,8 +142,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { }, { .showType = QUERY_NODE_SHOW_STREAMS_STMT, - .pDbName = TSDB_PERFORMANCE_SCHEMA_DB, - .pTableName = TSDB_PERFS_TABLE_STREAMS, + .pDbName = TSDB_INFORMATION_SCHEMA_DB, + .pTableName = TSDB_INS_TABLE_STREAMS, .numOfShowCols = 1, .pShowCols = {"stream_name"} }, @@ -184,8 +184,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { }, { .showType = QUERY_NODE_SHOW_TOPICS_STMT, - .pDbName = TSDB_PERFORMANCE_SCHEMA_DB, - .pTableName = TSDB_PERFS_TABLE_TOPICS, + .pDbName = TSDB_INFORMATION_SCHEMA_DB, + .pTableName = TSDB_INS_TABLE_TOPICS, .numOfShowCols = 1, .pShowCols = {"topic_name"} }, @@ -240,8 +240,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { }, { .showType = QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT, - .pDbName = TSDB_PERFORMANCE_SCHEMA_DB, - .pTableName = TSDB_PERFS_TABLE_SUBSCRIPTIONS, + .pDbName = TSDB_INFORMATION_SCHEMA_DB, + .pTableName = TSDB_INS_TABLE_SUBSCRIPTIONS, .numOfShowCols = 1, .pShowCols = {"*"} }, diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index b376c33d1a..cd7a9d549a 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -137,7 +137,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) { } { ITableBuilder& builder = - mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1) + mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1) .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); builder.done(); } @@ -149,7 +149,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) { } { ITableBuilder& builder = - mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1) + mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1) .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); builder.done(); } From 1cdacdda90e3b6d8036fff578f04f524f57045e3 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 30 Aug 2022 14:00:38 +0800 Subject: [PATCH 23/72] fix: move some tables from perf to ins --- docs/zh/12-taos-sql/22-meta.md | 32 ++++++++++++++++++++++++++++++++ docs/zh/12-taos-sql/23-perf.md | 32 -------------------------------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md index e9cda45b0f..3ae444e8fe 100644 --- a/docs/zh/12-taos-sql/22-meta.md +++ b/docs/zh/12-taos-sql/22-meta.md @@ -246,3 +246,35 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们 | 1 | dnode_id | INT | dnode 的 ID | | 2 | name | BINARY(32) | 配置项名称 | | 3 | value | BINARY(64) | 该配置项的值 | + +## INS_TOPICS + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | ------------------------------ | +| 1 | topic_name | BINARY(192) | topic 名称 | +| 2 | db_name | BINARY(64) | topic 相关的 DB | +| 3 | create_time | TIMESTAMP | topic 的 创建时间 | +| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 | + +## INS_SUBSCRIPTIONS + +| # | **列名** | **数据类型** | **说明** | +| --- | :------------: | ------------ | ------------------------ | +| 1 | topic_name | BINARY(204) | 被订阅的 topic | +| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 | +| 3 | vgroup_id | INT | 消费者被分配的 vgroup id | +| 4 | consumer_id | BIGINT | 消费者的唯一 id | + +## INS_STREAMS + +| # | **列名** | **数据类型** | **说明** | +| --- | :----------: | ------------ | --------------------------------------- | +| 1 | stream_name | BINARY(64) | 流计算名称 | +| 2 | create_time | TIMESTAMP | 创建时间 | +| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 | +| 4 | status | BIANRY(20) | 流当前状态 | +| 5 | source_db | BINARY(64) | 源数据库 | +| 6 | target_db | BIANRY(64) | 目的数据库 | +| 7 | target_table | BINARY(192) | 流计算写入的目标表 | +| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 | +| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 | diff --git a/docs/zh/12-taos-sql/23-perf.md b/docs/zh/12-taos-sql/23-perf.md index e6ff4960a7..808d9ae31a 100644 --- a/docs/zh/12-taos-sql/23-perf.md +++ b/docs/zh/12-taos-sql/23-perf.md @@ -62,15 +62,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其 | 12 | sub_status | BINARY(1000) | 子查询状态 | | 13 | sql | BINARY(1024) | SQL 语句 | -## PERF_TOPICS - -| # | **列名** | **数据类型** | **说明** | -| --- | :---------: | ------------ | ------------------------------ | -| 1 | topic_name | BINARY(192) | topic 名称 | -| 2 | db_name | BINARY(64) | topic 相关的 DB | -| 3 | create_time | TIMESTAMP | topic 的 创建时间 | -| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 | - ## PERF_CONSUMERS | # | **列名** | **数据类型** | **说明** | @@ -84,15 +75,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其 | 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 | | 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 | -## PERF_SUBSCRIPTIONS - -| # | **列名** | **数据类型** | **说明** | -| --- | :------------: | ------------ | ------------------------ | -| 1 | topic_name | BINARY(204) | 被订阅的 topic | -| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 | -| 3 | vgroup_id | INT | 消费者被分配的 vgroup id | -| 4 | consumer_id | BIGINT | 消费者的唯一 id | - ## PERF_TRANS | # | **列名** | **数据类型** | **说明** | @@ -114,17 +96,3 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其 | 2 | create_time | TIMESTAMP | sma 创建时间 | | 3 | stable_name | BINARY(192) | sma 所属的超级表名称 | | 4 | vgroup_id | INT | sma 专属的 vgroup 名称 | - -## PERF_STREAMS - -| # | **列名** | **数据类型** | **说明** | -| --- | :----------: | ------------ | --------------------------------------- | -| 1 | stream_name | BINARY(64) | 流计算名称 | -| 2 | create_time | TIMESTAMP | 创建时间 | -| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 | -| 4 | status | BIANRY(20) | 流当前状态 | -| 5 | source_db | BINARY(64) | 源数据库 | -| 6 | target_db | BIANRY(64) | 目的数据库 | -| 7 | target_table | BINARY(192) | 流计算写入的目标表 | -| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 | -| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 | From f4ddc6d39365c9963fb47ee4272da5d1626b52be Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 30 Aug 2022 14:14:37 +0800 Subject: [PATCH 24/72] fix sml case --- tests/system-test/2-query/sml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index 5a83cbfa90..b7e167c8b5 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -86,7 +86,7 @@ class TDTestCase: tdSql.query(f"select * from {dbname}.macylr") tdSql.checkRows(2) - tdSql.query("desc macylr") + tdSql.query(f"desc {dbname}.macylr") tdSql.checkRows(25) return From 254d16667c673e79df0c3353a8fcd13461957f01 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 30 Aug 2022 14:19:02 +0800 Subject: [PATCH 25/72] fix: no memory free in parser for stmt --- source/libs/parser/src/parInsert.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 56fbafe76d..049d1ef545 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -1669,6 +1669,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache pDb = taosHashIterate(context.pDbFNameHashObj, pDb); } } + if (pContext->pStmtCb) { + context.pVgroupsHashObj = NULL; + context.pTableBlockHashObj = NULL; + } destroyInsertParseContext(&context); return code; } From dba4188f8a1ce06b7bb762cdc7d242f78b6e4cba Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 30 Aug 2022 14:33:03 +0800 Subject: [PATCH 26/72] fix(query): reset the page id. --- source/libs/executor/src/executorimpl.c | 19 ++++++++++--------- source/libs/executor/src/timewindowoperator.c | 11 ++--------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 19daa7e96a..4ffa80d468 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3434,6 +3434,7 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize, const char* pKey) { + int32_t code = 0; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pAggSup->currentPageId = -1; @@ -3450,18 +3451,18 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); if (!osTempSpaceAvailable()) { - terrno = TSDB_CODE_NO_AVAIL_DISK; - qError("Init stream agg supporter failed since %s", terrstr(terrno)); - return terrno; - } - - int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir); - if (code != TSDB_CODE_SUCCESS) { - qError("Create agg result buf failed since %s", tstrerror(code)); + code = TSDB_CODE_NO_AVAIL_DISK; + qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey); return code; } - return TSDB_CODE_SUCCESS; + code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir); + if (code != TSDB_CODE_SUCCESS) { + qError("Create agg result buf failed since %s, %s", tstrerror(code), pKey); + return code; + } + + return code; } void cleanupAggSup(SAggSupporter* pAggSup) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 9ea6b4c42f..c28bc7e9e8 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1828,12 +1828,6 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt return needed; } -void increaseTs(SqlFunctionCtx* pCtx) { - if (pCtx[0].pExpr->pExpr->_function.pFunctNode->funcType == FUNCTION_TYPE_WSTART) { -// pCtx[0].increase = true; - } -} - void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) { if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { // Todo(liuyao) support partition by column @@ -1895,7 +1889,6 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* if (isStream) { ASSERT(numOfCols > 0); - increaseTs(pSup->pCtx); initStreamFunciton(pSup->pCtx, pSup->numOfExprs); } @@ -3050,6 +3043,7 @@ static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) tSimpleHashClear(pInfo->aggSup.pResultRowHashTable); clearDiskbasedBuf(pInfo->aggSup.pResultBuf); initResultRowInfo(&pInfo->binfo.resultRowInfo); + pInfo->aggSup.currentPageId = -1; } static void clearSpecialDataBlock(SSDataBlock* pBlock) { @@ -3420,7 +3414,6 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, initBasicInfo(&pInfo->binfo, pResBlock); ASSERT(numOfCols > 0); - increaseTs(pOperator->exprSupp.pCtx); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); initResultRowInfo(&pInfo->binfo.resultRowInfo); @@ -3451,6 +3444,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, // semi interval operator does not catch result pInfo->isFinal = false; pOperator->name = "StreamSemiIntervalOperator"; + ASSERT(pInfo->aggSup.currentPageId == -1); } if (!IS_FINAL_OP(pInfo) || numOfChild == 0) { @@ -3563,7 +3557,6 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* } ASSERT(numOfCols > 0); - increaseTs(pSup->pCtx); return TSDB_CODE_SUCCESS; } From 4d2d0cf9646cda3d9020ab80ab28fb30f6356e5a Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 30 Aug 2022 14:49:13 +0800 Subject: [PATCH 27/72] enh: add query metric --- source/client/inc/clientInt.h | 3 ++- source/client/src/clientEnv.c | 18 +++++++++--------- source/client/src/clientImpl.c | 4 +++- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index d57175d31b..4331da1506 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -102,6 +102,7 @@ typedef struct SQueryExecMetric { int64_t ctgEnd; // end to parse, us int64_t semanticEnd; int64_t planEnd; + int64_t resultReady; int64_t execEnd; int64_t send; // start to send to server, us int64_t rsp; // receive response from server, us @@ -370,7 +371,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest); int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList); void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta); -int32_t removeMeta(STscObj* pTscObj, SArray* tbList); +int32_t removeMeta(STscObj* pTscObj, SArray* tbList); int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog); bool qnodeRequired(SRequestObj* pRequest); diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 9117287b32..bf92a9ba6a 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -76,19 +76,19 @@ static void deregisterRequest(SRequestObj *pRequest) { pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst); if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) { - tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us", - duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, - pRequest->metric.ctgEnd - pRequest->metric.ctgStart, - pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, + tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 + "us, exec:%" PRId64 "us", + duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, + pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd); atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { - tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, planner:%" PRId64 "us, exec:%" PRId64 "us", - duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, - pRequest->metric.ctgEnd - pRequest->metric.ctgStart, - pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, + tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 + "us, planner:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, + pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, pRequest->metric.planEnd - pRequest->metric.semanticEnd, - nowUs - pRequest->metric.semanticEnd); + pRequest->metric.resultReady - pRequest->metric.planEnd); atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); } diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 97429e099b..f91ceb3184 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -728,7 +728,7 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog tFreeSTableMetaRsp(blk->pMeta); taosMemoryFreeClear(blk->pMeta); } - + if (NULL == blk->tblFName || 0 == blk->tblFName[0]) { continue; } @@ -851,6 +851,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { SRequestObj* pRequest = (SRequestObj*)param; pRequest->code = code; + pRequest->metric.resultReady = taosGetTimestampUs(); + if (pResult) { memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult)); } From 399e921e9c006dd0170ddb850894141b1aa5df29 Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 30 Aug 2022 15:17:41 +0800 Subject: [PATCH 28/72] fix timetruncate case --- tests/system-test/2-query/timetruncate.py | 26 ++++++++--------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py index 357d9fa957..d773114c3c 100644 --- a/tests/system-test/2-query/timetruncate.py +++ b/tests/system-test/2-query/timetruncate.py @@ -82,39 +82,31 @@ class TDTestCase: ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0])) tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000) - def check_ns_timestamp(self, unit, date_time:list): + def check_ns_timestamp(self,unit,date_time): if unit.lower() == '1b': for i in range(len(self.ts_str)): - ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i])) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i])) elif unit.lower() == '1u': for i in range(len(self.ts_str)): - ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000)*1000) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000)*1000) elif unit.lower() == '1a': for i in range(len(self.ts_str)): - ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000)*1000*1000) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000)*1000*1000) elif unit.lower() == '1s': for i in range(len(self.ts_str)): - ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000)*1000*1000*1000) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000)*1000*1000*1000) elif unit.lower() == '1m': for i in range(len(self.ts_str)): - ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000/60)*60*1000*1000*1000) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60)*60*1000*1000*1000) elif unit.lower() == '1h': for i in range(len(self.ts_str)): - ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 ) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 ) elif unit.lower() == '1d': for i in range(len(self.ts_str)): - ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 ) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 ) elif unit.lower() == '1w': for i in range(len(self.ts_str)): - ts_result = self.get_time.get_ns_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]*1000/1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000) def check_tb_type(self,unit,tb_type): if tb_type.lower() == 'ntb': From aec3a06431ab395709e6839cd720c388b5bc7f68 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 30 Aug 2022 16:11:42 +0800 Subject: [PATCH 29/72] feature: shell csv rfc 4180 --- tools/shell/inc/shellInt.h | 2 +- tools/shell/src/shellEngine.c | 56 +++++++++++++++++++++----------- tools/shell/src/shellWebsocket.c | 24 ++++++++------ 3 files changed, 52 insertions(+), 30 deletions(-) diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h index 26ca6895ac..6a9d6cb0df 100644 --- a/tools/shell/inc/shellInt.h +++ b/tools/shell/inc/shellInt.h @@ -113,7 +113,7 @@ int32_t shellExecute(); int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision); void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields); void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t length, int32_t precision); -void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision); +void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision, bool quotation); // shellUtil.c int32_t shellCheckIntSize(); void shellPrintVersion(); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 68e3a272c3..10e806e041 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -264,9 +264,15 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { return buf; } -void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) { +void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision, bool quotation) { + char quotationStr[2]; + quotationStr[0] = 0; + quotationStr[1] = 0; + if (quotation) { + quotationStr[0] = '\"'; + } if (val == NULL) { - taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); + taosFprintfFile(pFile, "%s%s%s", quotationStr, TSDB_DATA_NULL_STR, quotationStr); return; } @@ -274,39 +280,39 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: - taosFprintfFile(pFile, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); + taosFprintfFile(pFile, "%s%d%s", quotationStr, ((((int32_t)(*((char *)val))) == 1) ? 1 : 0), quotationStr); break; case TSDB_DATA_TYPE_TINYINT: - taosFprintfFile(pFile, "%d", *((int8_t *)val)); + taosFprintfFile(pFile, "%s%d%s", quotationStr, *((int8_t *)val), quotationStr); break; case TSDB_DATA_TYPE_UTINYINT: - taosFprintfFile(pFile, "%u", *((uint8_t *)val)); + taosFprintfFile(pFile, "%s%u%s", quotationStr, *((uint8_t *)val), quotationStr); break; case TSDB_DATA_TYPE_SMALLINT: - taosFprintfFile(pFile, "%d", *((int16_t *)val)); + taosFprintfFile(pFile, "%s%d%s", quotationStr, *((int16_t *)val), quotationStr); break; case TSDB_DATA_TYPE_USMALLINT: - taosFprintfFile(pFile, "%u", *((uint16_t *)val)); + taosFprintfFile(pFile, "%s%u%s", quotationStr, *((uint16_t *)val), quotationStr); break; case TSDB_DATA_TYPE_INT: - taosFprintfFile(pFile, "%d", *((int32_t *)val)); + taosFprintfFile(pFile, "%s%d%s", quotationStr, *((int32_t *)val), quotationStr); break; case TSDB_DATA_TYPE_UINT: - taosFprintfFile(pFile, "%u", *((uint32_t *)val)); + taosFprintfFile(pFile, "%s%u%s", quotationStr, *((uint32_t *)val), quotationStr); break; case TSDB_DATA_TYPE_BIGINT: - taosFprintfFile(pFile, "%" PRId64, *((int64_t *)val)); + taosFprintfFile(pFile, "%s%" PRId64 "%s", quotationStr, *((int64_t *)val), quotationStr); break; case TSDB_DATA_TYPE_UBIGINT: - taosFprintfFile(pFile, "%" PRIu64, *((uint64_t *)val)); + taosFprintfFile(pFile, "%s%" PRIu64 "%s", quotationStr, *((uint64_t *)val), quotationStr); break; case TSDB_DATA_TYPE_FLOAT: - taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val)); + taosFprintfFile(pFile, "%s%.5f%s", quotationStr, GET_FLOAT_VAL(val), quotationStr); break; case TSDB_DATA_TYPE_DOUBLE: - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%s%*.9f%s", quotationStr, length, GET_DOUBLE_VAL(val), quotationStr); if (n > TMAX(25, length)) { - taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val)); + taosFprintfFile(pFile, "%s%*.15e%s", quotationStr, length, GET_DOUBLE_VAL(val), quotationStr); } else { taosFprintfFile(pFile, "%s", buf); } @@ -314,13 +320,21 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_JSON: - memcpy(buf, val, length); - buf[length] = 0; - taosFprintfFile(pFile, "\'%s\'", buf); + int32_t bufIndex = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { + buf[bufIndex] = val[i]; + bufIndex++; + } + } + buf[bufIndex] = 0; + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); break; case TSDB_DATA_TYPE_TIMESTAMP: shellFormatTimestamp(buf, *(int64_t *)val, precision); - taosFprintfFile(pFile, "'%s'", buf); + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); break; default: break; @@ -347,12 +361,16 @@ int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres) { TAOS_FIELD *fields = taos_fetch_fields(tres); int32_t num_fields = taos_num_fields(tres); int32_t precision = taos_result_precision(tres); + bool quotation = false; for (int32_t col = 0; col < num_fields; col++) { if (col > 0) { taosFprintfFile(pFile, ","); } taosFprintfFile(pFile, "%s", fields[col].name); + if (fields[col].type == TSDB_DATA_TYPE_BINARY || fields[col].type == TSDB_DATA_TYPE_NCHAR || fields[col].type == TSDB_DATA_TYPE_JSON) { + quotation = true; + } } taosFprintfFile(pFile, "\r\n"); @@ -363,7 +381,7 @@ int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres) { if (i > 0) { taosFprintfFile(pFile, ","); } - shellDumpFieldToFile(pFile, (const char *)row[i], fields + i, length[i], precision); + shellDumpFieldToFile(pFile, (const char *)row[i], fields + i, length[i], precision, quotation); } taosFprintfFile(pFile, "\r\n"); diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index 2dcab04b3f..37961b75c7 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -132,17 +132,21 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute *pexecute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { taosCloseFile(&pFile); - return 0; + return 0; } int numOfRows = 0; TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); int num_fields = ws_field_count(wres); int precision = ws_result_precision(wres); + bool quotation = false; for (int col = 0; col < num_fields; col++) { if (col > 0) { taosFprintfFile(pFile, ","); } taosFprintfFile(pFile, "%s", fields[col].name); + if (fields[col].type == TSDB_DATA_TYPE_BINARY || fields[col].type == TSDB_DATA_TYPE_NCHAR || fields[col].type == TSDB_DATA_TYPE_JSON) { + quotation = true; + } } taosFprintfFile(pFile, "\r\n"); do { @@ -155,7 +159,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute taosFprintfFile(pFile, ","); } const void *value = ws_get_value_in_block(wres, i, j, &ty, &len); - shellDumpFieldToFile(pFile, (const char*)value, fields + j, len, precision); + shellDumpFieldToFile(pFile, (const char*)value, fields + j, len, precision, quotation); } taosFprintfFile(pFile, "\r\n"); } @@ -233,17 +237,17 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { fprintf(stdout, "Database changed.\r\n\r\n"); fflush(stdout); - ws_free_result(res); + ws_free_result(res); return; } int numOfRows = 0; if (ws_is_update_query(res)) { - numOfRows = ws_affected_rows(res); - et = taosGetTimestampUs(); + numOfRows = ws_affected_rows(res); + et = taosGetTimestampUs(); double total_time = (et - st)/1E3; double net_time = total_time - (double)execute_time; - printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows); + printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows); printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); } else { int error_no = 0; @@ -253,15 +257,15 @@ void shellRunSingleCommandWebsocketImp(char *command) { return; } et = taosGetTimestampUs(); - double total_time = (et - st) / 1E3; - double net_time = total_time - execute_time; + double total_time = (et - st) / 1E3; + double net_time = total_time - execute_time; if (error_no == 0 && !shell.stop_query) { printf("Query OK, %d row(s) in set\n", numOfRows); - printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); } else { printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows, (et - st)/1E6); - printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); } } printf("\n"); From 8a34a321c76e5587c759320be85b8ca0a18b3d80 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 30 Aug 2022 16:30:20 +0800 Subject: [PATCH 30/72] feature: shell csv rfc 4180 --- source/util/src/version.c.in | 2 +- tools/shell/src/shellEngine.c | 16 +++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/source/util/src/version.c.in b/source/util/src/version.c.in index be1a4a4048..cb307b57fc 100644 --- a/source/util/src/version.c.in +++ b/source/util/src/version.c.in @@ -1,4 +1,4 @@ -char version[12] = "${TD_VER_NUMBER}"; +char version[64] = "${TD_VER_NUMBER}"; char compatible_version[12] = "${TD_VER_COMPATIBLE}"; char gitinfo[48] = "${TD_VER_GIT}"; char buildinfo[64] = "Built at ${TD_VER_DATE}"; diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 10e806e041..84987f80ec 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -320,17 +320,19 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_JSON: - int32_t bufIndex = 0; - for (int32_t i = 0; i < length; i++) { - buf[bufIndex] = val[i]; - bufIndex++; - if (val[i] == '\"') { + { + int32_t bufIndex = 0; + for (int32_t i = 0; i < length; i++) { buf[bufIndex] = val[i]; bufIndex++; + if (val[i] == '\"') { + buf[bufIndex] = val[i]; + bufIndex++; + } } + buf[bufIndex] = 0; + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); } - buf[bufIndex] = 0; - taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); break; case TSDB_DATA_TYPE_TIMESTAMP: shellFormatTimestamp(buf, *(int64_t *)val, precision); From 825858b52ed8cccef2514f5961aa5b41b9de6429 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 30 Aug 2022 17:04:09 +0800 Subject: [PATCH 31/72] fix(query): reset the default page id. --- source/libs/executor/src/timewindowoperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index c28bc7e9e8..152bd5939d 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4330,6 +4330,7 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) { } } clearDiskbasedBuf(pInfo->streamAggSup.pResultBuf); + pInfo->streamAggSup.currentPageId = -1; } static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) { From 4632c3751b0b6623a0e79c9c9937ac14225228a3 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 30 Aug 2022 17:21:38 +0800 Subject: [PATCH 32/72] feature: shell csv rfc 4180 --- tools/shell/inc/shellInt.h | 2 +- tools/shell/src/shellEngine.c | 43 ++++++++++++++++++----------------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h index 6a9d6cb0df..15f6f6dc6a 100644 --- a/tools/shell/inc/shellInt.h +++ b/tools/shell/inc/shellInt.h @@ -113,7 +113,7 @@ int32_t shellExecute(); int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision); void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields); void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t length, int32_t precision); -void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision, bool quotation); +void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision); // shellUtil.c int32_t shellCheckIntSize(); void shellPrintVersion(); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 84987f80ec..fdc28c92ec 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -264,15 +264,9 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { return buf; } -void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision, bool quotation) { - char quotationStr[2]; - quotationStr[0] = 0; - quotationStr[1] = 0; - if (quotation) { - quotationStr[0] = '\"'; - } +void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) { if (val == NULL) { - taosFprintfFile(pFile, "%s%s%s", quotationStr, TSDB_DATA_NULL_STR, quotationStr); + taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); return; } @@ -280,39 +274,39 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: - taosFprintfFile(pFile, "%s%d%s", quotationStr, ((((int32_t)(*((char *)val))) == 1) ? 1 : 0), quotationStr); + taosFprintfFile(pFile, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); break; case TSDB_DATA_TYPE_TINYINT: - taosFprintfFile(pFile, "%s%d%s", quotationStr, *((int8_t *)val), quotationStr); + taosFprintfFile(pFile, "%d", *((int8_t *)val)); break; case TSDB_DATA_TYPE_UTINYINT: - taosFprintfFile(pFile, "%s%u%s", quotationStr, *((uint8_t *)val), quotationStr); + taosFprintfFile(pFile, "%u", *((uint8_t *)val)); break; case TSDB_DATA_TYPE_SMALLINT: - taosFprintfFile(pFile, "%s%d%s", quotationStr, *((int16_t *)val), quotationStr); + taosFprintfFile(pFile, "%d", *((int16_t *)val)); break; case TSDB_DATA_TYPE_USMALLINT: - taosFprintfFile(pFile, "%s%u%s", quotationStr, *((uint16_t *)val), quotationStr); + taosFprintfFile(pFile, "%u", *((uint16_t *)val)); break; case TSDB_DATA_TYPE_INT: - taosFprintfFile(pFile, "%s%d%s", quotationStr, *((int32_t *)val), quotationStr); + taosFprintfFile(pFile, "%d", *((int32_t *)val)); break; case TSDB_DATA_TYPE_UINT: - taosFprintfFile(pFile, "%s%u%s", quotationStr, *((uint32_t *)val), quotationStr); + taosFprintfFile(pFile, "%u", *((uint32_t *)val)); break; case TSDB_DATA_TYPE_BIGINT: - taosFprintfFile(pFile, "%s%" PRId64 "%s", quotationStr, *((int64_t *)val), quotationStr); + taosFprintfFile(pFile, "%" PRId64, *((int64_t *)val)); break; case TSDB_DATA_TYPE_UBIGINT: - taosFprintfFile(pFile, "%s%" PRIu64 "%s", quotationStr, *((uint64_t *)val), quotationStr); + taosFprintfFile(pFile, "%" PRIu64, *((uint64_t *)val)); break; case TSDB_DATA_TYPE_FLOAT: - taosFprintfFile(pFile, "%s%.5f%s", quotationStr, GET_FLOAT_VAL(val), quotationStr); + taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val)); break; case TSDB_DATA_TYPE_DOUBLE: - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%s%*.9f%s", quotationStr, length, GET_DOUBLE_VAL(val), quotationStr); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val)); if (n > TMAX(25, length)) { - taosFprintfFile(pFile, "%s%*.15e%s", quotationStr, length, GET_DOUBLE_VAL(val), quotationStr); + taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val)); } else { taosFprintfFile(pFile, "%s", buf); } @@ -321,13 +315,20 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_JSON: { + char quotationStr[2]; int32_t bufIndex = 0; + quotationStr[0] = 0; + quotationStr[1] = 0; for (int32_t i = 0; i < length; i++) { buf[bufIndex] = val[i]; bufIndex++; if (val[i] == '\"') { buf[bufIndex] = val[i]; bufIndex++; + quotationStr[0] = '\"'; + } + if (val[i] == ',') { + quotationStr[0] = '\"'; } } buf[bufIndex] = 0; @@ -336,7 +337,7 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i break; case TSDB_DATA_TYPE_TIMESTAMP: shellFormatTimestamp(buf, *(int64_t *)val, precision); - taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + taosFprintfFile(pFile, "%s", buf); break; default: break; From 3220a9547f8fb0c79069713fce4fc5166d73999a Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 30 Aug 2022 17:23:02 +0800 Subject: [PATCH 33/72] feature: shell csv rfc 4180 --- tools/shell/src/shellWebsocket.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index 37961b75c7..84d083e1c1 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -138,15 +138,11 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); int num_fields = ws_field_count(wres); int precision = ws_result_precision(wres); - bool quotation = false; for (int col = 0; col < num_fields; col++) { if (col > 0) { taosFprintfFile(pFile, ","); } taosFprintfFile(pFile, "%s", fields[col].name); - if (fields[col].type == TSDB_DATA_TYPE_BINARY || fields[col].type == TSDB_DATA_TYPE_NCHAR || fields[col].type == TSDB_DATA_TYPE_JSON) { - quotation = true; - } } taosFprintfFile(pFile, "\r\n"); do { @@ -159,7 +155,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute taosFprintfFile(pFile, ","); } const void *value = ws_get_value_in_block(wres, i, j, &ty, &len); - shellDumpFieldToFile(pFile, (const char*)value, fields + j, len, precision, quotation); + shellDumpFieldToFile(pFile, (const char*)value, fields + j, len, precision); } taosFprintfFile(pFile, "\r\n"); } From c5f270abcbb5c49117462f57a94ebafeede749fb Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 30 Aug 2022 17:24:24 +0800 Subject: [PATCH 34/72] feature: shell csv rfc 4180 --- tools/shell/src/shellEngine.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index fdc28c92ec..3b760a097f 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -364,16 +364,12 @@ int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres) { TAOS_FIELD *fields = taos_fetch_fields(tres); int32_t num_fields = taos_num_fields(tres); int32_t precision = taos_result_precision(tres); - bool quotation = false; for (int32_t col = 0; col < num_fields; col++) { if (col > 0) { taosFprintfFile(pFile, ","); } taosFprintfFile(pFile, "%s", fields[col].name); - if (fields[col].type == TSDB_DATA_TYPE_BINARY || fields[col].type == TSDB_DATA_TYPE_NCHAR || fields[col].type == TSDB_DATA_TYPE_JSON) { - quotation = true; - } } taosFprintfFile(pFile, "\r\n"); @@ -384,7 +380,7 @@ int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres) { if (i > 0) { taosFprintfFile(pFile, ","); } - shellDumpFieldToFile(pFile, (const char *)row[i], fields + i, length[i], precision, quotation); + shellDumpFieldToFile(pFile, (const char *)row[i], fields + i, length[i], precision); } taosFprintfFile(pFile, "\r\n"); From 52bedb2b532f605059915541e719b1bb532192bf Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 30 Aug 2022 17:27:35 +0800 Subject: [PATCH 35/72] feature: shell csv rfc 4180 --- tools/shell/src/shellWebsocket.c | 102 +++++++++++++++---------------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index 84d083e1c1..b8b8392b96 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -18,19 +18,19 @@ #include "shellInt.h" int shell_conn_ws_server(bool first) { - shell.ws_conn = ws_connect_with_dsn(shell.args.dsn); - if (!shell.ws_conn) { - fprintf(stderr, "failed to connect %s, reason: %s\n", - shell.args.dsn, ws_errstr(NULL)); - return -1; - } - if (first && shell.args.restful) { - fprintf(stdout, "successfully connect to %s\n\n", - shell.args.dsn); - } else if (first && shell.args.cloud) { - fprintf(stdout, "successfully connect to cloud service\n"); - } - return 0; + shell.ws_conn = ws_connect_with_dsn(shell.args.dsn); + if (!shell.ws_conn) { + fprintf(stderr, "failed to connect %s, reason: %s\n", + shell.args.dsn, ws_errstr(NULL)); + return -1; + } + if (first && shell.args.restful) { + fprintf(stdout, "successfully connect to %s\n\n", + shell.args.dsn); + } else if (first && shell.args.cloud) { + fprintf(stdout, "successfully connect to cloud service\n"); + } + return 0; } static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) { @@ -39,7 +39,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) { ws_fetch_block(wres, &data, &rows); *execute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { - return 0; + return 0; } int num_fields = ws_field_count(wres); TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); @@ -64,7 +64,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) { putchar(' '); putchar('|'); } - putchar('\r'); + putchar('\r'); putchar('\n'); } numOfRows += rows; @@ -79,7 +79,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) { ws_fetch_block(wres, &data, &rows); *pexecute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { - return 0; + return 0; } int num_fields = ws_field_count(wres); TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); @@ -98,7 +98,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) { uint32_t len; for (int i = 0; i < rows; i++) { printf("*************************** %d.row ***************************\n", - numOfRows + 1); + numOfRows + 1); for (int j = 0; j < num_fields; j++) { TAOS_FIELD* field = fields + j; int padding = (int)(maxColNameLen - strlen(field->name)); @@ -121,7 +121,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute } TdFilePtr pFile = taosOpenFile(fullname, - TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM); + TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM); if (pFile == NULL) { fprintf(stderr, "failed to open file: %s\r\n", fullname); return -1; @@ -132,7 +132,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute *pexecute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { taosCloseFile(&pFile); - return 0; + return 0; } int numOfRows = 0; TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); @@ -207,7 +207,7 @@ void shellRunSingleCommandWebsocketImp(char *command) { } if (!shell.ws_conn && shell_conn_ws_server(0)) { - return; + return; } shell.stop_query = false; @@ -216,16 +216,16 @@ void shellRunSingleCommandWebsocketImp(char *command) { WS_RES* res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout); int code = ws_errno(res); if (code != 0) { - et = taosGetTimestampUs(); - fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6); - if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) { - fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n"); - } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) { - fprintf(stderr, "TDengine server is down, will try to reconnect\n"); - shell.ws_conn = NULL; - } - ws_free_result(res); - return; + et = taosGetTimestampUs(); + fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6); + if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) { + fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n"); + } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) { + fprintf(stderr, "TDengine server is down, will try to reconnect\n"); + shell.ws_conn = NULL; + } + ws_free_result(res); + return; } double execute_time = ws_take_timing(res)/1E6; @@ -233,36 +233,36 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { fprintf(stdout, "Database changed.\r\n\r\n"); fflush(stdout); - ws_free_result(res); + ws_free_result(res); return; } int numOfRows = 0; if (ws_is_update_query(res)) { - numOfRows = ws_affected_rows(res); - et = taosGetTimestampUs(); + numOfRows = ws_affected_rows(res); + et = taosGetTimestampUs(); double total_time = (et - st)/1E3; double net_time = total_time - (double)execute_time; - printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows); + printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows); printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); } else { - int error_no = 0; - numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time); - if (numOfRows < 0) { - ws_free_result(res); - return; - } - et = taosGetTimestampUs(); - double total_time = (et - st) / 1E3; - double net_time = total_time - execute_time; - if (error_no == 0 && !shell.stop_query) { - printf("Query OK, %d row(s) in set\n", numOfRows); - printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); - } else { - printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows, - (et - st)/1E6); - printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); - } + int error_no = 0; + numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time); + if (numOfRows < 0) { + ws_free_result(res); + return; + } + et = taosGetTimestampUs(); + double total_time = (et - st) / 1E3; + double net_time = total_time - execute_time; + if (error_no == 0 && !shell.stop_query) { + printf("Query OK, %d row(s) in set\n", numOfRows); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); + } else { + printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows, + (et - st)/1E6); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); + } } printf("\n"); ws_free_result(res); From 753b7253005ad1e9f05faef9448be8c7ef0d00ad Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Tue, 30 Aug 2022 17:29:02 +0800 Subject: [PATCH 36/72] other: simplify qtaskinfo file naming --- source/dnode/vnode/src/sma/smaCommit.c | 2 +- source/dnode/vnode/src/sma/smaRollup.c | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c index 4d1dcd6909..3cf50a035a 100644 --- a/source/dnode/vnode/src/sma/smaCommit.c +++ b/source/dnode/vnode/src/sma/smaCommit.c @@ -172,7 +172,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) { TdDirPtr pDir = NULL; TdDirEntryPtr pDirEntry = NULL; char dir[TSDB_FILENAME_LEN]; - const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$"; + const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$"; regex_t regex; int code = 0; diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 89cdd58c4e..af41c53956 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -28,11 +28,10 @@ SSmaMgmt smaMgmt = { .rsetId = -1, }; -#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver" -#define TD_RSMAINFO_DEL_FILE "rsmainfo.del" +#define TD_QTASKINFO_FNAME_PREFIX "qinf.v" + typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem; typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter; -typedef struct SRSmaExecQItem SRSmaExecQItem; static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid); static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids); @@ -83,11 +82,6 @@ struct SRSmaQTaskInfoIter { int32_t nBufPos; }; -struct SRSmaExecQItem { - void *pRSmaInfo; - void *qall; -}; - void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) { tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName); } From 60b1742e283e682e5b6ecc5cde8cd2391e0aec8a Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Tue, 30 Aug 2022 17:38:41 +0800 Subject: [PATCH 37/72] chore: remove CMakeSettings.json and add it to .gitignore It is an output file of Visual Studio --- .gitignore | 1 + CMakeSettings.json | 25 ------------------------- 2 files changed, 1 insertion(+), 25 deletions(-) delete mode 100644 CMakeSettings.json diff --git a/.gitignore b/.gitignore index 76b581b182..5f1e24109d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ build/ compile_commands.json +CMakeSettings.json .cache .ycm_extra_conf.py .tasks diff --git a/CMakeSettings.json b/CMakeSettings.json deleted file mode 100644 index d3f2c27bf6..0000000000 --- a/CMakeSettings.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "configurations": [ - { - "name": "WSL-GCC-Debug", - "generator": "Unix Makefiles", - "configurationType": "Debug", - "buildRoot": "${projectDir}\\build\\", - "installRoot": "${projectDir}\\build\\", - "cmakeExecutable": "/usr/bin/cmake", - "cmakeCommandArgs": "", - "buildCommandArgs": "", - "ctestCommandArgs": "", - "inheritEnvironments": [ "linux_x64" ], - "wslPath": "${defaultWSLPath}", - "addressSanitizerRuntimeFlags": "detect_leaks=0", - "variables": [ - { - "name": "CMAKE_INSTALL_PREFIX", - "value": "/mnt/d/TDengine/TDengine/build", - "type": "PATH" - } - ] - } - ] -} \ No newline at end of file From 651d7d16aee31c0fa5966e28e720f305ee23ec8d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 30 Aug 2022 17:41:54 +0800 Subject: [PATCH 38/72] feat: update taostools 9cb965f for3.0 (#16511) * feat: update taos-tools for 3.0 [TD-14141] * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools 8e3b3ee * fix: remove submodules * feat: update taos-tools c529299 * feat: update taos-tools 9dc2fec for 3.0 * fix: optim upx * feat: update taos-tools f4e456a for 3.0 * feat: update taos-tools 2a2def1 for 3.0 * feat: update taos-tools c9cc20f for 3.0 * feat: update taostoosl 8a5e336 for 3.0 * feat: update taostools 3c7dafe for 3.0 * feat: update taos-tools 2d68404 for 3.0 * feat: update taos-tools 57bdfbf for 3.0 * fix: jenkinsfile2 to upgrade pip * feat: update taostoosl 11d23e5 for 3.0 * feat: update taostools 43924b8 for 3.0 * feat: update taostools 53a0103 for 3.0 * feat: update taostoosl d237772 for 3.0 * feat: update taos-tools 6bde102 for 3.0 * feat: upate taos-tools 2af2222 for 3.0 * feat: update taos-tools 833b721 for 3.0 * feat: update taostools e8bfca6 for 3.0 * feat: update taos-tools aa45ad4 for 3.0 * feat: update taos tools 9cb965f for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index c0de75c6dd..68caf9a9ac 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG aa45ad4 + GIT_TAG 9cb965f SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From f34287faaa533362daeb19c74509cc0d7612033b Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 30 Aug 2022 19:13:04 +0800 Subject: [PATCH 39/72] test: modify checkpackages scritps --- packaging/testpackage.sh | 64 +++++++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 14 deletions(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 054c24eb5d..4b6264db2b 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -7,6 +7,7 @@ originPackageName=$3 originversion=$4 testFile=$5 subFile="taos.tar.gz" +password=$6 if [ ${testFile} = "server" ];then tdPath="TDengine-server-${version}" @@ -56,6 +57,7 @@ fi cmdInstall tree cmdInstall wget +cmdInstall sshpass echo "new workroom path" installPath="/usr/local/src/packageTest" @@ -74,24 +76,49 @@ else echo "${oriInstallPath} already exists" fi -echo "decompress installPackage" + + + +echo "download installPackage" +# cd ${installPath} +# wget https://www.taosdata.com/assets-download/3.0/${packgeName} +# cd ${oriInstallPath} +# wget https://www.taosdata.com/assets-download/3.0/${originPackageName} cd ${installPath} -wget https://www.taosdata.com/assets-download/3.0/${packgeName} -cd ${oriInstallPath} -wget https://www.taosdata.com/assets-download/3.0/${originPackageName} - +if [ ! -f {packgeName} ];then + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} . +fi +if [ ! -f debAuto.sh ];then + echo '#!/usr/bin/expect ' > debAuto.sh + echo 'set timeout 3 ' >> debAuto.sh + echo 'pset packgeName [lindex $argv 0]' >> debAuto.sh + echo 'spawn dpkg -i ${packgeName}' >> debAuto.sh + echo 'expect "*one:"' >> debAuto.sh + echo 'send "\r"' >> debAuto.sh + echo 'expect "*skip:"' >> debAuto.sh + echo 'send "\r" ' >> debAuto.sh +fi if [[ ${packgeName} =~ "deb" ]];then cd ${installPath} - echo "dpkg ${packgeName}" && dpkg -i ${packgeName} + dpkg -r taostools + dpkg -r tdengine + if [[ ${packgeName} =~ "TDengine" ]];then + echo "./debAuto.sh ${packgeName}" && chmod 755 debAuto.sh && ./debAuto.sh ${packgeName} + else + echo "dpkg -i ${packgeName}" && dpkg -i ${packgeName} + elif [[ ${packgeName} =~ "rpm" ]];then cd ${installPath} - echo "rpm ${packgeName}" && rpm -ivh ${packgeName} + echo "rpm ${packgeName}" && rpm -ivh ${packgeName} --quiet elif [[ ${packgeName} =~ "tar" ]];then - echo "tar ${packgeName}" && tar -xvf ${packgeName} - cd ${oriInstallPath} + cd ${oriInstallPath} + if [ ! -f {originPackageName} ];then + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community${originPackageName} . + fi echo "tar -xvf ${originPackageName}" && tar -xvf ${originPackageName} + cd ${installPath} echo "tar -xvf ${packgeName}" && tar -xvf ${packgeName} @@ -105,10 +132,10 @@ elif [[ ${packgeName} =~ "tar" ]];then cd ${installPath} - tree ${oriInstallPath}/${originTdpPath} > ${originPackageName}_checkfile - tree ${installPath}/${tdPath} > ${packgeName}_checkfile + tree ${oriInstallPath}/${originTdpPath} > ${oriInstallPath}/${originPackageName}_checkfile + tree ${installPath}/${tdPath} > ${installPath}/${packgeName}_checkfile - diff ${packgeName}_checkfile ${originPackageName}_checkfile > ${installPath}/diffFile.log + diff ${installPath}/${packgeName}_checkfile ${oriInstallPath}/${originPackageName}_checkfile > ${installPath}/diffFile.log diffNumbers=`cat ${installPath}/diffFile.log |wc -l ` if [ ${diffNumbers} != 0 ];then echo "The number and names of files have changed from the previous installation package" @@ -122,11 +149,20 @@ elif [[ ${packgeName} =~ "tar" ]];then else bash ${installCmd} fi - if [[ ${packgeName} =~ "Lite" ]];then + if [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]] ;then cd ${installPath} - wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . + # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz tar xvf taosTools-2.1.2-Linux-x64.tar.gz cd taosTools-2.1.2 && bash install-taostools.sh + elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then + cd ${installPath} + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.deb . + dpkg -i taosTools-2.1.2-Linux-x64.deb + elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then + cd ${installPath} + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.rpm . + rpm -ivh taosTools-2.1.2-Linux-x64.rpm --quiet fi fi From 6478a2d600a7beb0eead4330ef6bc39e58a121b7 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 30 Aug 2022 19:13:26 +0800 Subject: [PATCH 40/72] test: modify checkpackages scritps --- packaging/MPtestJenkinsfile | 1 - 1 file changed, 1 deletion(-) diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile index a003bf354c..45c8d8abf2 100644 --- a/packaging/MPtestJenkinsfile +++ b/packaging/MPtestJenkinsfile @@ -182,7 +182,6 @@ pipeline { cd ${TDENGINE_ROOT_DIR}/packaging bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server python3 checkPackageRuning.py - rmtaos ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging From 2ddfca5149eea7d1c9a1d8bfb53a0ac4267c3fb6 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Tue, 30 Aug 2022 19:29:26 +0800 Subject: [PATCH 41/72] other: code optimization for assert condition --- source/dnode/vnode/src/sma/smaRollup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index af41c53956..f33d8dc2d0 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -1737,7 +1737,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { break; } } - ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0)); + atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0); } } if (type == RSMA_EXEC_COMMIT) { @@ -1766,7 +1766,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { } // tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr); - ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0)); + atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0); } } ASSERT(taosQueueItemSize(pInfo->iQueue) == 0); From 65e2b4d34b06a718ad3f269d29975bb146043a50 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 31 Aug 2022 09:43:40 +0800 Subject: [PATCH 42/72] fix: fix case issue --- tests/script/tsim/user/privilege_sysinfo.sim | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim index e8348d92d4..86f95755d0 100644 --- a/tests/script/tsim/user/privilege_sysinfo.sim +++ b/tests/script/tsim/user/privilege_sysinfo.sim @@ -163,6 +163,9 @@ sql select * from information_schema.ins_stables sql select * from information_schema.ins_tables sql select * from information_schema.ins_tags sql select * from information_schema.ins_users +sql select * from information_schema.ins_topics +sql select * from information_schema.ins_subscriptions +sql select * from information_schema.ins_streams sql_error select * from information_schema.ins_grants sql_error select * from information_schema.ins_vgroups sql_error select * from information_schema.ins_configs @@ -172,11 +175,8 @@ print =============== check performance_schema sql use performance_schema; sql select * from performance_schema.perf_connections sql select * from performance_schema.perf_queries -sql select * from performance_schema.perf_topics sql select * from performance_schema.perf_consumers -sql select * from performance_schema.perf_subscriptions sql select * from performance_schema.perf_trans -sql select * from performance_schema.perf_streams sql select * from performance_schema.perf_apps #system sh/exec.sh -n dnode1 -s stop -x SIGINT From 52787dd2b2333cb18f517707c816546e9ec67148 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 31 Aug 2022 09:54:34 +0800 Subject: [PATCH 43/72] fix: move tables from perf to ins --- docs/en/12-taos-sql/22-meta.md | 32 ++++++++++++++++++++++++++++++++ docs/en/12-taos-sql/23-perf.md | 32 -------------------------------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md index 796b25dcb0..9bda5a0a10 100644 --- a/docs/en/12-taos-sql/22-meta.md +++ b/docs/en/12-taos-sql/22-meta.md @@ -245,3 +245,35 @@ Provides dnode configuration information. | 1 | dnode_id | INT | Dnode ID | | 2 | name | BINARY(32) | Parameter | | 3 | value | BINARY(64) | Value | + +## INS_TOPICS + +| # | **Column** | **Data Type** | **Description** | +| --- | :---------: | ------------ | ------------------------------ | +| 1 | topic_name | BINARY(192) | Topic name | +| 2 | db_name | BINARY(64) | Database for the topic | +| 3 | create_time | TIMESTAMP | Creation time | +| 4 | sql | BINARY(1024) | SQL statement used to create the topic | + +## INS_SUBSCRIPTIONS + +| # | **Column** | **Data Type** | **Description** | +| --- | :------------: | ------------ | ------------------------ | +| 1 | topic_name | BINARY(204) | Subscribed topic | +| 2 | consumer_group | BINARY(193) | Subscribed consumer group | +| 3 | vgroup_id | INT | Vgroup ID for the consumer | +| 4 | consumer_id | BIGINT | Consumer ID | + +## INS_STREAMS + +| # | **Column** | **Data Type** | **Description** | +| --- | :----------: | ------------ | --------------------------------------- | +| 1 | stream_name | BINARY(64) | Stream name | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | sql | BINARY(1024) | SQL statement used to create the stream | +| 4 | status | BIANRY(20) | Current status | +| 5 | source_db | BINARY(64) | Source database | +| 6 | target_db | BIANRY(64) | Target database | +| 7 | target_table | BINARY(192) | Target table | +| 8 | watermark | BIGINT | Watermark (see stream processing documentation) | +| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) | diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md index 10a9338022..29cf3af6ab 100644 --- a/docs/en/12-taos-sql/23-perf.md +++ b/docs/en/12-taos-sql/23-perf.md @@ -61,15 +61,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE | 12 | sub_status | BINARY(1000) | Subquery status | | 13 | sql | BINARY(1024) | SQL statement | -## PERF_TOPICS - -| # | **Column** | **Data Type** | **Description** | -| --- | :---------: | ------------ | ------------------------------ | -| 1 | topic_name | BINARY(192) | Topic name | -| 2 | db_name | BINARY(64) | Database for the topic | -| 3 | create_time | TIMESTAMP | Creation time | -| 4 | sql | BINARY(1024) | SQL statement used to create the topic | - ## PERF_CONSUMERS | # | **Column** | **Data Type** | **Description** | @@ -83,15 +74,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE | 7 | subscribe_time | TIMESTAMP | Time of first subscription | | 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering | -## PERF_SUBSCRIPTIONS - -| # | **Column** | **Data Type** | **Description** | -| --- | :------------: | ------------ | ------------------------ | -| 1 | topic_name | BINARY(204) | Subscribed topic | -| 2 | consumer_group | BINARY(193) | Subscribed consumer group | -| 3 | vgroup_id | INT | Vgroup ID for the consumer | -| 4 | consumer_id | BIGINT | Consumer ID | - ## PERF_TRANS | # | **Column** | **Data Type** | **Description** | @@ -113,17 +95,3 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE | 2 | create_time | TIMESTAMP | Creation time | | 3 | stable_name | BINARY(192) | Supertable name | | 4 | vgroup_id | INT | Dedicated vgroup name | - -## PERF_STREAMS - -| # | **Column** | **Data Type** | **Description** | -| --- | :----------: | ------------ | --------------------------------------- | -| 1 | stream_name | BINARY(64) | Stream name | -| 2 | create_time | TIMESTAMP | Creation time | -| 3 | sql | BINARY(1024) | SQL statement used to create the stream | -| 4 | status | BIANRY(20) | Current status | -| 5 | source_db | BINARY(64) | Source database | -| 6 | target_db | BIANRY(64) | Target database | -| 7 | target_table | BINARY(192) | Target table | -| 8 | watermark | BIGINT | Watermark (see stream processing documentation) | -| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) | From ed6b49c7c6ffb1bbe1f0898d09f32c0eef66b423 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 31 Aug 2022 10:55:45 +0800 Subject: [PATCH 44/72] fix: heap over flow in schemaless --- source/client/src/clientSml.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 4968c5c68d..c5ce2f632c 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -456,7 +456,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE); if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable); + uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); goto end; } info->cost.numOfCreateSTables++; @@ -492,7 +492,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action); if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable); + uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); goto end; } } From 2fc2000c7adecca0c2246542c9eea30cc19cf2a5 Mon Sep 17 00:00:00 2001 From: cpwu Date: Wed, 31 Aug 2022 11:00:25 +0800 Subject: [PATCH 45/72] fix case --- tests/pytest/util/common.py | 4 +- .../1-insert/influxdb_line_taosc_insert.py | 84 +++++------ .../1-insert/opentsdb_json_taosc_insert.py | 138 ++++++++--------- .../opentsdb_telnet_line_taosc_insert.py | 140 +++++++++--------- 4 files changed, 183 insertions(+), 183 deletions(-) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 132c1f029c..9ffebcbdad 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -208,11 +208,11 @@ class TDCom: stb_list = map(lambda x: x[0], res_row_list) for stb in stb_list: if type == "taosc": - tdSql.execute(f'drop table if exists `{dbname}.{stb}`') + tdSql.execute(f'drop table if exists {dbname}.`{stb}`') if not stb[0].isdigit(): tdSql.execute(f'drop table if exists {dbname}.{stb}') elif type == "restful": - self.restApiPost(f"drop table if exists `{dbname}.{stb}`") + self.restApiPost(f"drop table if exists {dbname}.`{stb}`") if not stb[0].isdigit(): self.restApiPost(f"drop table if exists {dbname}.{stb}") diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py index 25e2378f46..cae4294bc9 100644 --- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -31,7 +31,7 @@ if platform.system().lower() == 'windows': class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor(), False) self._conn = conn def createDb(self, name="test", db_update_tag=0): @@ -357,7 +357,7 @@ class TDTestCase: """ normal tags and cols, one for every elm """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) @@ -365,7 +365,7 @@ class TDTestCase: """ check all normal type """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] for t_type in full_type_list: input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type) @@ -379,7 +379,7 @@ class TDTestCase: please test : binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' ''' - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' nchar_symbols = f'L{binary_symbols}' input_sql, stb_name = self.genFullTypeSql(c7=binary_symbols, c8=nchar_symbols, t7=binary_symbols, t8=nchar_symbols) @@ -390,7 +390,7 @@ class TDTestCase: test ts list --> ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"] # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过 """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") ts_list = ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0] for ts in ts_list: input_sql, stb_name = self.genFullTypeSql(ts=ts) @@ -401,7 +401,7 @@ class TDTestCase: check id.index in tags eg: t0=**,id=**,t1=** """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_change_tag=True) self.resCmp(input_sql, stb_name) @@ -410,7 +410,7 @@ class TDTestCase: check id param eg: id and ID """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True) self.resCmp(input_sql, stb_name) input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True) @@ -420,7 +420,7 @@ class TDTestCase: """ id not exist """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True) self.resCmp(input_sql, stb_name) query_sql = f"select tbname from {stb_name}" @@ -436,10 +436,10 @@ class TDTestCase: max col count is ?? """ for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) except SchemalessError as err: @@ -450,7 +450,7 @@ class TDTestCase: test illegal id name mix "~!@#$¥%^&*()-+|[]、「」【】;:《》<>?" """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") rstr = list("~!@#$¥%^&*()-+|[]、「」【】;:《》<>?") for i in rstr: stb_name=f"aaa{i}bbb" @@ -462,7 +462,7 @@ class TDTestCase: """ id is start with num """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -473,7 +473,7 @@ class TDTestCase: """ check now unsupported """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="now")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -484,7 +484,7 @@ class TDTestCase: """ check date format ts unsupported """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -495,7 +495,7 @@ class TDTestCase: """ check ts format like 16260068336390us19 """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -506,7 +506,7 @@ class TDTestCase: """ check full type tag value limit """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for t1 in ["-128i8", "127i8"]: input_sql, stb_name = self.genFullTypeSql(t1=t1) @@ -602,7 +602,7 @@ class TDTestCase: """ check full type col value limit """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for c1 in ["-128i8", "127i8"]: input_sql, stb_name = self.genFullTypeSql(c1=c1) @@ -699,7 +699,7 @@ class TDTestCase: """ test illegal tag col value """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # bool for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: input_sql1 = self.genFullTypeSql(t0=i)[0] @@ -758,7 +758,7 @@ class TDTestCase: """ check duplicate Id Tag Col """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] try: self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -792,7 +792,7 @@ class TDTestCase: """ case no id when stb exist """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f") self.resCmp(input_sql, stb_name) input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f") @@ -805,7 +805,7 @@ class TDTestCase: """ check duplicate insert when stb exist """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -816,7 +816,7 @@ class TDTestCase: """ check length increase """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) tb_name = tdCom.getLongName(5, "letters") @@ -833,7 +833,7 @@ class TDTestCase: * col is added without value when update==0 * col is added with value when update==1 """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") for db_update_tag in [0, 1]: if db_update_tag == 1 : @@ -850,7 +850,7 @@ class TDTestCase: """ check column and tag count add """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f") self.resCmp(input_sql, stb_name) @@ -866,7 +866,7 @@ class TDTestCase: condition: stb not change insert two table, keep tag unchange, change col """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f", id_noexist_tag=True) self.resCmp(input_sql, stb_name) tb_name1 = self.getNoIdTbName(stb_name) @@ -888,7 +888,7 @@ class TDTestCase: """ every binary and nchar must be length+2 """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000' @@ -928,7 +928,7 @@ class TDTestCase: """ check nchar length limit """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000' @@ -963,7 +963,7 @@ class TDTestCase: """ test batch insert """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") # tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", @@ -982,7 +982,7 @@ class TDTestCase: """ test multi insert """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") sql_list = [] stb_name = tdCom.getLongName(8, "letters") # tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') @@ -996,7 +996,7 @@ class TDTestCase: """ test batch error insert """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"] @@ -1068,7 +1068,7 @@ class TDTestCase: """ thread input different stb """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genSqlList()[0] self.multiThreadRun(self.genMultiThreadSeq(input_sql)) tdSql.query(f"show tables;") @@ -1078,7 +1078,7 @@ class TDTestCase: """ thread input same stb tb, different data, result keep first data """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1095,7 +1095,7 @@ class TDTestCase: """ thread input same stb tb, different data, add columes and tags, result keep first data """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1112,7 +1112,7 @@ class TDTestCase: """ thread input same stb tb, different data, minus columes and tags, result keep first data """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1129,7 +1129,7 @@ class TDTestCase: """ thread input same stb, different tb, different data """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] @@ -1144,7 +1144,7 @@ class TDTestCase: """ thread input same stb, different tb, different data, add col, mul tag """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5] @@ -1159,7 +1159,7 @@ class TDTestCase: """ thread input same stb, different tb, different data, add tag, mul col """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6] @@ -1171,7 +1171,7 @@ class TDTestCase: """ thread input same stb tb, different ts """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1186,7 +1186,7 @@ class TDTestCase: """ thread input same stb tb, different ts, add col, mul tag """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1205,7 +1205,7 @@ class TDTestCase: """ thread input same stb tb, different ts, add tag, mul col """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1226,7 +1226,7 @@ class TDTestCase: """ thread input same stb, different tb, data, ts """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] @@ -1241,7 +1241,7 @@ class TDTestCase: """ thread input same stb, different tb, data, ts, add col, mul tag """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11] diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py index 003abe9d10..3b01784000 100644 --- a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py +++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py @@ -459,7 +459,7 @@ class TDTestCase: normal tags and cols, one for every elm """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(value_type=value_type) self.resCmp(input_json, stb_name) @@ -468,7 +468,7 @@ class TDTestCase: check all normal type """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] for t_type in full_type_list: input_json_list = [self.genFullTypeJson(tag_value=self.genTagValue(t0_value=t_type))[0], @@ -489,7 +489,7 @@ class TDTestCase: binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' nchar_symbols = binary_symbols input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type), @@ -505,7 +505,7 @@ class TDTestCase: # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006834", 0] for ts in ts_list: if "s" in str(ts): @@ -571,7 +571,7 @@ class TDTestCase: eg: t0=**,id=**,t1=** """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type) self.resCmp(input_json, stb_name) @@ -581,7 +581,7 @@ class TDTestCase: eg: id and ID """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(id_upper_tag=True, value_type=value_type) self.resCmp(input_json, stb_name) input_json, stb_name = self.genFullTypeJson(id_mixul_tag=True, value_type=value_type) @@ -594,7 +594,7 @@ class TDTestCase: id not exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(id_noexist_tag=True, value_type=value_type) self.resCmp(input_json, stb_name) query_sql = f"select tbname from {stb_name}" @@ -610,10 +610,10 @@ class TDTestCase: """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') for input_json in [self.genLongJson(128, value_type)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) for input_json in [self.genLongJson(129, value_type)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) except SchemalessError as err: @@ -625,7 +625,7 @@ class TDTestCase: mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?") for i in rstr: input_json = self.genFullTypeJson(tb_name=f'aa{i}bb', value_type=value_type)[0] @@ -639,7 +639,7 @@ class TDTestCase: id is start with num """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(tb_name="1aaabbb", value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -651,7 +651,7 @@ class TDTestCase: check now unsupported """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="now", t_type="ns", value_type=value_type))[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -663,7 +663,7 @@ class TDTestCase: check date format ts unsupported """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="2021-07-21\ 19:01:46.920", t_type="ns", value_type=value_type))[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -675,7 +675,7 @@ class TDTestCase: check ts format like 16260068336390us19 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="16260068336390us19", t_type="us", value_type=value_type))[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -690,7 +690,7 @@ class TDTestCase: length of stb_name tb_name <= 192 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tdSql.execute('reset query cache') stb_name_192 = tdCom.getLongName(len=192, mode="letters") tb_name_192 = tdCom.getLongName(len=192, mode="letters") @@ -715,7 +715,7 @@ class TDTestCase: check tag name limit <= 62 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tag_name = tdCom.getLongName(61, "letters") tag_name = f't{tag_name}' stb_name = tdCom.getLongName(7, "letters") @@ -733,7 +733,7 @@ class TDTestCase: check full type tag value limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for t1 in [-127, 127]: input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1, value_type=value_type)) @@ -854,12 +854,12 @@ class TDTestCase: check full type col value limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for value in [-128, 127]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint", value_type=value_type)) self.resCmp(input_json, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-129, 128]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint"))[0] try: @@ -868,11 +868,11 @@ class TDTestCase: except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) # i16 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-32768]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint", value_type=value_type)) self.resCmp(input_json, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-32769, 32768]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint"))[0] try: @@ -882,11 +882,11 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i32 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-2147483648]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int", value_type=value_type)) self.resCmp(input_json, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-2147483649, 2147483648]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int"))[0] try: @@ -896,12 +896,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i64 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-9223372036854775808]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type)) self.resCmp(input_json, stb_name) # ! bug - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # for value in [-9223372036854775809, 9223372036854775808]: # print(value) # input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint"))[0] @@ -913,12 +913,12 @@ class TDTestCase: # tdSql.checkNotEqual(err.errno, 0) # f32 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type)) self.resCmp(input_json, stb_name) # * limit set to 4028234664*(10**38) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-3.4028234664*(10**38), 3.4028234664*(10**38)]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float"))[0] try: @@ -928,12 +928,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # f64 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308), -1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type)) self.resCmp(input_json, stb_name) # * limit set to 1.797693134862316*(10**308) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-1.797693134862316*(10**308), -1.797693134862316*(10**308)]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0] try: @@ -944,12 +944,12 @@ class TDTestCase: # if value_type == "obj": # # binary - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} # try: # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -959,12 +959,12 @@ class TDTestCase: # # nchar # # * legal nchar could not be larger than 16374/4 - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} # try: # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -973,14 +973,14 @@ class TDTestCase: # tdSql.checkNotEqual(err.errno, 0) # elif value_type == "default": # # binary - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16374, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} # elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4093, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16375, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} # elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": @@ -997,7 +997,7 @@ class TDTestCase: test illegal tag col value """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # bool for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: try: @@ -1046,7 +1046,7 @@ class TDTestCase: check duplicate Id Tag Col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(id_double_tag=True, value_type=value_type)[0] print(input_json) try: @@ -1068,7 +1068,7 @@ class TDTestCase: case no id when stb exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(tb_name="sub_table_0123456", col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) self.resCmp(input_json, stb_name) input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, id_noexist_tag=True, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) @@ -1081,7 +1081,7 @@ class TDTestCase: check duplicate insert when stb exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(value_type=value_type) self.resCmp(input_json, stb_name) self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1091,7 +1091,7 @@ class TDTestCase: """ check length increase """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(value_type=value_type) self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) self.resCmp(input_json, stb_name) @@ -1105,7 +1105,7 @@ class TDTestCase: check length increase """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = "test_crash" input_json = self.genFullTypeJson(stb_name=stb_name)[0] self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1128,7 +1128,7 @@ class TDTestCase: * col is added with value when update==1 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") for db_update_tag in [0, 1]: if db_update_tag == 1 : @@ -1154,7 +1154,7 @@ class TDTestCase: check tag count add """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) self.resCmp(input_json, stb_name) @@ -1171,7 +1171,7 @@ class TDTestCase: insert two table, keep tag unchange, change col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True) self.resCmp(input_json, stb_name) tb_name1 = self.getNoIdTbName(stb_name) @@ -1194,7 +1194,7 @@ class TDTestCase: every binary and nchar must be length+2 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' tag_value = {"t0": {"value": True, "type": "bool"}} @@ -1240,7 +1240,7 @@ class TDTestCase: check nchar length limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' tag_value = {"t0": True} @@ -1284,7 +1284,7 @@ class TDTestCase: test batch insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = "stb_name" tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": 1, "type": "bigint"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}}, @@ -1319,7 +1319,7 @@ class TDTestCase: test multi insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") sql_list = list() stb_name = tdCom.getLongName(8, "letters") tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') @@ -1335,7 +1335,7 @@ class TDTestCase: test batch error insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": "tt", "type": "bool"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}}, {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}] try: @@ -1349,7 +1349,7 @@ class TDTestCase: test multi cols insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(c_multi_tag=True, value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1362,7 +1362,7 @@ class TDTestCase: test blank col insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(c_blank_tag=True, value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1375,7 +1375,7 @@ class TDTestCase: test blank tag insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(t_blank_tag=True, value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1388,7 +1388,7 @@ class TDTestCase: check nchar ---> chinese """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(chinese_tag=True) self.resCmp(input_json, stb_name) @@ -1397,7 +1397,7 @@ class TDTestCase: multi_field ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(multi_field_tag=True, value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1407,7 +1407,7 @@ class TDTestCase: def spellCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") input_json_list = [{"metric": f'{stb_name}_1', "timestamp": {"value": 1626006833639000000, "type": "Ns"}, "value": {"value": 1, "type": "Bigint"}, "tags": {"t1": {"value": 127, "type": "tinYint"}}}, {"metric": f'{stb_name}_2', "timestamp": {"value": 1626006833639000001, "type": "nS"}, "value": {"value": 32767, "type": "smallInt"}, "tags": {"t1": {"value": 32767, "type": "smallInt"}}}, @@ -1426,7 +1426,7 @@ class TDTestCase: def tbnameTagsColsNameCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = {'metric': 'rFa$sta', 'timestamp': {'value': 1626006834, 'type': 's'}, 'value': {'value': True, 'type': 'bool'}, 'tags': {'Tt!0': {'value': False, 'type': 'bool'}, 'tT@1': {'value': 127, 'type': 'tinyint'}, 't@2': {'value': 32767, 'type': 'smallint'}, 't$3': {'value': 2147483647, 'type': 'int'}, 't%4': {'value': 9223372036854775807, 'type': 'bigint'}, 't^5': {'value': 11.12345027923584, 'type': 'float'}, 't&6': {'value': 22.123456789, 'type': 'double'}, 't*7': {'value': 'binaryTagValue', 'type': 'binary'}, 't!@#$%^&*()_+[];:<>?,9': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': 'rFas$ta_1'}} self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) query_sql = 'select * from `rFa$sta`' @@ -1441,7 +1441,7 @@ class TDTestCase: metric value "." trans to "_" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(point_trans_tag=True, value_type=value_type)[0] self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) tdSql.execute("drop table `.point.trans.test`") @@ -1509,7 +1509,7 @@ class TDTestCase: thread input different stb """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genSqlList(value_type=value_type)[0] self.multiThreadRun(self.genMultiThreadSeq(input_json)) tdSql.query(f"show tables;") @@ -1520,7 +1520,7 @@ class TDTestCase: thread input same stb tb, different data, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) @@ -1538,7 +1538,7 @@ class TDTestCase: thread input same stb tb, different data, add columes and tags, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) @@ -1556,7 +1556,7 @@ class TDTestCase: thread input same stb tb, different data, minus columes and tags, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) @@ -1574,7 +1574,7 @@ class TDTestCase: thread input same stb, different tb, different data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) s_stb_d_tb_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[4] @@ -1587,7 +1587,7 @@ class TDTestCase: thread input same stb, different tb, different data, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) s_stb_d_tb_m_tag_list = [({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "omfdhyom", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'), @@ -1605,7 +1605,7 @@ class TDTestCase: thread input same stb, different tb, different data, add tag, mul col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[6] @@ -1618,7 +1618,7 @@ class TDTestCase: thread input same stb tb, different ts """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) @@ -1638,7 +1638,7 @@ class TDTestCase: thread input same stb tb, different ts, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) @@ -1660,7 +1660,7 @@ class TDTestCase: thread input same stb tb, different ts, add tag, mul col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) @@ -1683,7 +1683,7 @@ class TDTestCase: thread input same stb, different tb, data, ts """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[10] @@ -1696,7 +1696,7 @@ class TDTestCase: thread input same stb, different tb, data, ts, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'), diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py index 3c47a65746..209cfb724e 100644 --- a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py +++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py @@ -30,7 +30,7 @@ if platform.system().lower() == 'windows': class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor(), False) self._conn = conn self.smlChildTableName_value = "id" @@ -351,7 +351,7 @@ class TDTestCase: normal tags and cols, one for every elm """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) @@ -360,7 +360,7 @@ class TDTestCase: check all normal type """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] for t_type in full_type_list: input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol) @@ -375,7 +375,7 @@ class TDTestCase: binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' nchar_symbols = f'L{binary_symbols}' input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol) @@ -388,7 +388,7 @@ class TDTestCase: test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"] """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value) input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) @@ -407,7 +407,7 @@ class TDTestCase: def openTstbTelnetTsCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' stb_name = input_sql.split(" ")[0] self.resCmp(input_sql, stb_name, ts=0) @@ -431,7 +431,7 @@ class TDTestCase: eg: t0=**,id=**,t1=** """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) @@ -441,7 +441,7 @@ class TDTestCase: eg: id and ID """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol) @@ -454,7 +454,7 @@ class TDTestCase: id not exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) query_sql = f"select tbname from {stb_name}" @@ -470,10 +470,10 @@ class TDTestCase: """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') for input_sql in [self.genLongSql(128)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) for input_sql in [self.genLongSql(129)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) raise Exception("should not reach here") @@ -486,7 +486,7 @@ class TDTestCase: mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?") for i in rstr: input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol) @@ -498,7 +498,7 @@ class TDTestCase: id is start with num """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) @@ -507,7 +507,7 @@ class TDTestCase: check now unsupported """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="now")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -520,7 +520,7 @@ class TDTestCase: check date format ts unsupported """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -533,7 +533,7 @@ class TDTestCase: check ts format like 16260068336390us19 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -551,7 +551,7 @@ class TDTestCase: tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') stb_name_192 = tdCom.getLongName(len=192, mode="letters") tb_name_192 = tdCom.getLongName(len=192, mode="letters") - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192) self.resCmp(input_sql, stb_name) tdSql.query(f'select * from {stb_name}') @@ -581,7 +581,7 @@ class TDTestCase: check tag name limit <= 62 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tag_name = tdCom.getLongName(61, "letters") tag_name = f'T{tag_name}' stb_name = tdCom.getLongName(7, "letters") @@ -599,7 +599,7 @@ class TDTestCase: check full type tag value limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # nchar # * legal nchar could not be larger than 16374/4 stb_name = tdCom.getLongName(7, "letters") @@ -618,12 +618,12 @@ class TDTestCase: check full type col value limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for value in ["-128i8", "127i8"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-129i8", "128i8"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -632,11 +632,11 @@ class TDTestCase: except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) # i16 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-32768i16"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-32769i16", "32768i16"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -646,11 +646,11 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i32 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-2147483648i32"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-2147483649i32", "2147483648i32"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -660,11 +660,11 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i64 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-9223372036854775808i64"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-9223372036854775809i64", "9223372036854775808i64"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -674,12 +674,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # f32 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) # * limit set to 4028234664*(10**38) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -689,12 +689,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # f64 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) # # * limit set to 1.797693134862316*(10**308) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: # input_sql = self.genFullTypeSql(value=value)[0] # try: @@ -704,12 +704,12 @@ class TDTestCase: # tdSql.checkNotEqual(err.errno, 0) # # # binary - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t' # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t' # try: # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -719,12 +719,12 @@ class TDTestCase: # # nchar # # * legal nchar could not be larger than 16374/4 - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t' # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t' # try: # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -738,7 +738,7 @@ class TDTestCase: test illegal tag col value """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # bool for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: input_sql1, stb_name = self.genFullTypeSql(t0=i) @@ -774,7 +774,7 @@ class TDTestCase: check blank case ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t', # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"', @@ -792,7 +792,7 @@ class TDTestCase: check duplicate Id Tag Col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] try: self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None) @@ -815,7 +815,7 @@ class TDTestCase: case no id when stb exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f") self.resCmp(input_sql, stb_name) input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f") @@ -828,7 +828,7 @@ class TDTestCase: check duplicate insert when stb exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -840,7 +840,7 @@ class TDTestCase: check length increase """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) tb_name = tdCom.getLongName(5, "letters") @@ -858,7 +858,7 @@ class TDTestCase: * col is added with value when update==1 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") for db_update_tag in [0, 1]: if db_update_tag == 1 : @@ -885,7 +885,7 @@ class TDTestCase: check tag count add """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") self.resCmp(input_sql, stb_name) @@ -902,7 +902,7 @@ class TDTestCase: insert two table, keep tag unchange, change col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True) self.resCmp(input_sql, stb_name) tb_name1 = self.getNoIdTbName(stb_name) @@ -925,7 +925,7 @@ class TDTestCase: check nchar length limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -949,7 +949,7 @@ class TDTestCase: test batch insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') @@ -976,7 +976,7 @@ class TDTestCase: test multi insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") sql_list = [] stb_name = tdCom.getLongName(8, "letters") tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))') @@ -992,7 +992,7 @@ class TDTestCase: test batch error insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"", f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""] @@ -1007,7 +1007,7 @@ class TDTestCase: test multi cols insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(c_multi_tag=True)[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1020,7 +1020,7 @@ class TDTestCase: test blank col insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(c_blank_tag=True)[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1033,7 +1033,7 @@ class TDTestCase: test blank tag insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(t_blank_tag=True)[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1046,7 +1046,7 @@ class TDTestCase: check nchar ---> chinese """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(chinese_tag=True) self.resCmp(input_sql, stb_name) @@ -1055,7 +1055,7 @@ class TDTestCase: multi_field ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(multi_field_tag=True)[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1065,7 +1065,7 @@ class TDTestCase: def spellCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', @@ -1086,7 +1086,7 @@ class TDTestCase: metric value "." trans to "_" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0] if protocol == 'telnet-tcp': stb_name = f'`{input_sql.split(" ")[1]}`' @@ -1097,7 +1097,7 @@ class TDTestCase: def defaultTypeCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \ f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \ @@ -1110,7 +1110,7 @@ class TDTestCase: def tbnameTagsColsNameCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") if self.smlChildTableName_value == "ID": input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1135,7 +1135,7 @@ class TDTestCase: stb = "put" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0] stb_name = f'`{input_sql.split(" ")[1]}`' self.resCmp(input_sql, stb_name, protocol=protocol) @@ -1204,7 +1204,7 @@ class TDTestCase: thread input different stb """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genSqlList()[0] print(input_sql) self.multiThreadRun(self.genMultiThreadSeq(input_sql)) @@ -1216,7 +1216,7 @@ class TDTestCase: thread input same stb tb, different data, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1235,7 +1235,7 @@ class TDTestCase: thread input same stb tb, different data, add columes and tags, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1254,7 +1254,7 @@ class TDTestCase: thread input same stb tb, different data, minus columes and tags, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1273,7 +1273,7 @@ class TDTestCase: thread input same stb, different tb, different data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] @@ -1286,7 +1286,7 @@ class TDTestCase: thread input same stb, different tb, different data, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ @@ -1303,7 +1303,7 @@ class TDTestCase: thread input same stb, different tb, different data, add tag, mul col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6] @@ -1316,7 +1316,7 @@ class TDTestCase: thread input same stb tb, different ts """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1336,7 +1336,7 @@ class TDTestCase: thread input same stb tb, different ts, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1354,7 +1354,7 @@ class TDTestCase: thread input same stb tb, different ts, add tag, mul col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1377,7 +1377,7 @@ class TDTestCase: thread input same stb, different tb, data, ts """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] @@ -1390,7 +1390,7 @@ class TDTestCase: thread input same stb, different tb, data, ts, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ From 07325e1b688e2ef88a3f82f109e603dbc0ac7650 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 31 Aug 2022 11:10:33 +0800 Subject: [PATCH 46/72] chore: add .gitattributes to fool github language detect (#16523) --- .gitattributes | 1 + 1 file changed, 1 insertion(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..912b302ad2 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.py linguist-detectable=false From c4dcc994fbb402cf2c01c8b63814c2f450855122 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 31 Aug 2022 11:35:25 +0800 Subject: [PATCH 47/72] remove queue scan --- include/common/tmsg.h | 1 + source/common/src/tmsg.c | 7 ++ source/dnode/mnode/impl/src/mndTopic.c | 5 +- source/dnode/vnode/src/inc/tq.h | 10 +- source/dnode/vnode/src/tq/tq.c | 51 ++++----- source/dnode/vnode/src/tq/tqExec.c | 144 ++++++++++++------------ source/libs/executor/inc/executorimpl.h | 41 +++---- source/libs/executor/src/executor.c | 30 +++-- source/libs/executor/src/executorimpl.c | 128 +-------------------- source/libs/executor/src/scanoperator.c | 131 +++++++++++---------- 10 files changed, 212 insertions(+), 336 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index d503592361..7728b0b5eb 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2957,6 +2957,7 @@ typedef struct { int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp); int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp); +void tDeleteSMqDataRsp(SMqDataRsp* pRsp); typedef struct { SMqRspHead head; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 2fc93cc9b5..af29ab7c50 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5889,6 +5889,13 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { return 0; } +void tDeleteSMqDataRsp(SMqDataRsp *pRsp) { + taosArrayDestroy(pRsp->blockDataLen); + taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree); + taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper); + taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree); +} + int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) { if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1; if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1; diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index ff208eae60..eb072d013d 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -763,8 +763,9 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl int32_t cols = 0; char topicName[TSDB_TOPIC_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB); - tNameGetDbName(&n, varDataVal(topicName)); + strcpy(varDataVal(topicName), mndGetDbStr(pTopic->name)); + /*tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);*/ + /*tNameGetDbName(&n, varDataVal(topicName));*/ varDataSetLen(topicName, strlen(varDataVal(topicName))); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)topicName, false); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index a97c8ff132..753cdc603e 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -67,21 +67,21 @@ typedef struct { // tqExec typedef struct { - char* qmsg; + char* qmsg; } STqExecCol; typedef struct { - int64_t suid; + int64_t suid; } STqExecTb; typedef struct { - SHashObj* pFilterOutTbUid; + SHashObj* pFilterOutTbUid; } STqExecDb; typedef struct { int8_t subType; - STqReader* pExecReader; + STqReader* pExecReader; qTaskInfo_t task; union { STqExecCol execCol; @@ -144,7 +144,7 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum); // tqExec -int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp); +int32_t tqLogScanExec(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, SMqDataRsp* pRsp); int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp); // tqMeta diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 54f764c6b3..eed997b486 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -357,8 +357,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { TD_VID(pTq->pVnode), formatBuf); } else { if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) { - if (pReq->useSnapshot){ - if (pHandle->fetchMeta){ + if (pReq->useSnapshot) { + if (pHandle->fetchMeta) { tqOffsetResetToMeta(&fetchOffsetNew, 0); } else { tqOffsetResetToData(&fetchOffsetNew, 0, 0); @@ -373,43 +373,47 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { code = -1; } - goto OVER; + tDeleteSMqDataRsp(&dataRsp); + return code; } else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) { tqError("tmq poll: subkey %s, no offset committed for consumer %" PRId64 " in vg %d, subkey %s, reset none failed", pHandle->subKey, consumerId, TD_VID(pTq->pVnode), pReq->subKey); terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET; code = -1; - goto OVER; + tDeleteSMqDataRsp(&dataRsp); + return code; } } } - if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN || fetchOffsetNew.type != TMQ_OFFSET__LOG){ + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN || fetchOffsetNew.type != TMQ_OFFSET__LOG) { SMqMetaRsp metaRsp = {0}; tqScan(pTq, pHandle, &dataRsp, &metaRsp, &fetchOffsetNew); - if(metaRsp.metaRspLen > 0){ + if (metaRsp.metaRspLen > 0) { if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) { code = -1; } - tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId, pHandle->subKey, - TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid, metaRsp.rspOffset.version); + tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId, + pHandle->subKey, TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid, + metaRsp.rspOffset.version); taosMemoryFree(metaRsp.metaRsp); goto OVER; } - if (dataRsp.blockNum > 0){ + if (dataRsp.blockNum > 0) { if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { code = -1; } goto OVER; - }else{ + } else { fetchOffsetNew = dataRsp.rspOffset; } - tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld", consumerId, pHandle->subKey, - TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type, dataRsp.rspOffset.uid, dataRsp.rspOffset.version); + tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld", + consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type, + dataRsp.rspOffset.uid, dataRsp.rspOffset.version); } if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN && fetchOffsetNew.type == TMQ_OFFSET__LOG) { @@ -426,7 +430,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { consumerEpoch = atomic_load_32(&pHandle->epoch); if (consumerEpoch > reqEpoch) { tqWarn("tmq poll: consumer %" PRId64 " (epoch %d), subkey %s, vg %d offset %" PRId64 - ", found new consumer epoch %d, discard req epoch %d", + ", found new consumer epoch %d, discard req epoch %d", consumerId, pReq->epoch, pHandle->subKey, TD_VID(pTq->pVnode), fetchVer, consumerEpoch, reqEpoch); break; } @@ -449,7 +453,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { if (pHead->msgType == TDMT_VND_SUBMIT) { SSubmitReq* pCont = (SSubmitReq*)&pHead->body; - if (tqLogScanExec(pTq, &pHandle->execHandle, pCont, &dataRsp) < 0) { + if (tqLogScanExec(pTq, pHandle, pCont, &dataRsp) < 0) { /*ASSERT(0);*/ } // TODO batch optimization: @@ -490,18 +494,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { OVER: if (pCkHead) taosMemoryFree(pCkHead); - // TODO wrap in destroy func - taosArrayDestroy(dataRsp.blockDataLen); - taosArrayDestroyP(dataRsp.blockData, (FDelete)taosMemoryFree); - - if (dataRsp.withSchema) { - taosArrayDestroyP(dataRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); - } - - if (dataRsp.withTbName) { - taosArrayDestroyP(dataRsp.blockTbName, (FDelete)taosMemoryFree); - } - + tDeleteSMqDataRsp(&dataRsp); return code; } @@ -629,9 +622,9 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList); taosArrayDestroy(tbUidList); - buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta, (SSnapContext **)(&handle.sContext)); - pHandle->execHandle.task = - qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL); + buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta, + (SSnapContext**)(&handle.sContext)); + pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL); } taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle)); tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId); diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index a0b8141cfb..bfd23f1a1a 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -60,6 +60,46 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) { return 0; } +int64_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) { + const STqExecHandle* pExec = &pHandle->execHandle; + ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN); + + qTaskInfo_t task = pExec->task; + + if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) { + tqDebug("prepare scan failed, return"); + if (pOffset->type == TMQ_OFFSET__LOG) { + pRsp->rspOffset = *pOffset; + return 0; + } else { + tqOffsetResetToLog(pOffset, pHandle->snapshotVer); + if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) { + tqDebug("prepare scan failed, return"); + pRsp->rspOffset = *pOffset; + return 0; + } + } + } + + int32_t rowCnt = 0; + while (1) { + SSDataBlock* pDataBlock = NULL; + uint64_t ts = 0; + tqDebug("tmqsnap task start to execute"); + if (qExecTask(task, &pDataBlock, &ts) < 0) { + ASSERT(0); + } + tqDebug("tmqsnap task execute end, get %p", pDataBlock); + + if (pDataBlock) { + tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols); + pRsp->blockNum++; + } + } + + return 0; +} + int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* pOffset) { const STqExecHandle* pExec = &pHandle->execHandle; qTaskInfo_t task = pExec->task; @@ -97,23 +137,20 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) { continue; } - } else { - char* tbName = strdup(qExtractTbnameFromTask(task)); - taosArrayPush(pRsp->blockTbName, &tbName); } } - if(pRsp->withSchema){ + if (pRsp->withSchema) { if (pOffset->type == TMQ_OFFSET__LOG) { tqAddBlockSchemaToRsp(pExec, pRsp); - }else{ + } else { SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); taosArrayPush(pRsp->blockSchema, &pSW); } } - if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN){ + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols); - }else{ + } else { tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock)); } pRsp->blockNum++; @@ -125,17 +162,9 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* } } - if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN){ - if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { - tqDebug("vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode), - pHandle->snapshotVer + 1); - tqOffsetResetToLog(pOffset, pHandle->snapshotVer); - qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType); - continue; - } - }else{ - if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA){ - if(qStreamExtractPrepareUid(task) != 0){ + if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN) { + if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { + if (qStreamExtractPrepareUid(task) != 0) { continue; } tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode), @@ -143,13 +172,13 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* break; } - if (pRsp->blockNum > 0){ + if (pRsp->blockNum > 0) { tqDebug("tmqsnap task exec exited, get data"); break; } SMqMetaRsp* tmp = qStreamExtractMetaMsg(task); - if(tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA){ + if (tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) { tqOffsetResetToData(pOffset, tmp->rspOffset.uid, tmp->rspOffset.ts); qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType); tmp->rspOffset.type = TMQ_OFFSET__SNAPSHOT_META; @@ -173,57 +202,8 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* return 0; } -#if 0 -int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal offset, int32_t workerId) { - ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN); - qTaskInfo_t task = pExec->execCol.task[workerId]; - - if (qStreamPrepareTsdbScan(task, offset.uid, offset.ts) < 0) { - ASSERT(0); - } - - int32_t rowCnt = 0; - while (1) { - SSDataBlock* pDataBlock = NULL; - uint64_t ts = 0; - if (qExecTask(task, &pDataBlock, &ts) < 0) { - ASSERT(0); - } - if (pDataBlock == NULL) break; - - ASSERT(pDataBlock->info.rows != 0); - ASSERT(taosArrayGetSize(pDataBlock->pDataBlock) != 0); - - tqAddBlockDataToRsp(pDataBlock, pRsp); - - if (pRsp->withTbName) { - pRsp->withTbName = 0; -#if 0 - int64_t uid; - int64_t ts; - if (qGetStreamScanStatus(task, &uid, &ts) < 0) { - ASSERT(0); - } - tqAddTbNameToRsp(pTq, uid, pRsp); -#endif - } - pRsp->blockNum++; - - rowCnt += pDataBlock->info.rows; - if (rowCnt >= 4096) break; - } - int64_t uid; - int64_t ts; - if (qGetStreamScanStatus(task, &uid, &ts) < 0) { - ASSERT(0); - } - tqOffsetResetToData(&pRsp->rspOffset, uid, ts); - - return 0; -} -#endif - -int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp) { +int32_t tqLogScanExec(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, SMqDataRsp* pRsp) { + STqExecHandle* pExec = &pHandle->execHandle; ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN); if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { @@ -268,6 +248,28 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR tqAddBlockSchemaToRsp(pExec, pRsp); pRsp->blockNum++; } +#if 0 + if (pHandle->fetchMeta && pRsp->blockNum) { + SSubmitMsgIter iter = {0}; + tInitSubmitMsgIter(pReq, &iter); + STaosxRsp* pXrsp = (STaosxRsp*)pRsp; + while (1) { + SSubmitBlk* pBlk = NULL; + if (tGetSubmitMsgNext(&iter, &pBlk) < 0) return -1; + if (pBlk->schemaLen > 0) { + if (pXrsp->createTableNum == 0) { + pXrsp->createTableLen = taosArrayInit(0, sizeof(int32_t)); + pXrsp->createTableReq = taosArrayInit(0, sizeof(void*)); + } + void* createReq = taosMemoryCalloc(1, pBlk->schemaLen); + memcpy(createReq, pBlk->data, pBlk->schemaLen); + taosArrayPush(pXrsp->createTableLen, &pBlk->schemaLen); + taosArrayPush(pXrsp->createTableReq, &createReq); + pXrsp->createTableNum++; + } + } + } +#endif } if (pRsp->blockNum == 0) { diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index f0518a72ab..b4e2840330 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -143,6 +143,8 @@ typedef struct { STqOffsetVal prepareStatus; // for tmq STqOffsetVal lastStatus; // for tmq SMqMetaRsp metaRsp; // for tmq fetching meta + int64_t snapshotVer; + SSchemaWrapper *schema; char tbName[TSDB_TABLE_NAME_LEN]; SSDataBlock* pullOverBlk; // for streaming @@ -486,24 +488,23 @@ typedef struct SStreamScanInfo { STimeWindowAggSupp twAggSup; SSDataBlock* pUpdateDataRes; // status for tmq - // SSchemaWrapper schema; - SNodeList* pGroupTags; - SNode* pTagCond; - SNode* pTagIndexCond; + SNodeList* pGroupTags; + SNode* pTagCond; + SNode* pTagIndexCond; } SStreamScanInfo; -typedef struct SStreamRawScanInfo{ -// int8_t subType; -// bool withMeta; -// int64_t suid; -// int64_t snapVersion; -// void *metaInfo; -// void *dataInfo; - SVnode* vnode; - SSDataBlock pRes; // result SSDataBlock - STsdbReader* dataReader; - SSnapContext* sContext; -}SStreamRawScanInfo; +typedef struct { + // int8_t subType; + // bool withMeta; + // int64_t suid; + // int64_t snapVersion; + // void *metaInfo; + // void *dataInfo; + SVnode* vnode; + SSDataBlock pRes; // result SSDataBlock + STsdbReader* dataReader; + SSnapContext* sContext; +} SStreamRawScanInfo; typedef struct SSysTableScanInfo { SRetrieveMetaTableRsp* pRsp; @@ -528,14 +529,14 @@ typedef struct SBlockDistInfo { SSDataBlock* pResBlock; void* pHandle; SReadHandle readHandle; - uint64_t uid; // table uid + uint64_t uid; // table uid } SBlockDistInfo; // todo remove this typedef struct SOptrBasicInfo { - SResultRowInfo resultRowInfo; - SSDataBlock* pRes; - bool mergeResultBlock; + SResultRowInfo resultRowInfo; + SSDataBlock* pRes; + bool mergeResultBlock; } SOptrBasicInfo; typedef struct SIntervalAggOperatorInfo { diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 124f4b44b0..7e631ab3e9 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -139,7 +139,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema) { if (msg == NULL) { - // TODO create raw scan + // create raw scan SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo)); if (NULL == pTaskInfo) { @@ -151,7 +151,7 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n pTaskInfo->cost.created = taosGetTimestampMs(); pTaskInfo->execModel = OPTR_EXEC_MODEL_QUEUE; pTaskInfo->pRoot = createRawScanOperatorInfo(readers, pTaskInfo); - if(NULL == pTaskInfo->pRoot){ + if (NULL == pTaskInfo->pRoot) { terrno = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(pTaskInfo); return NULL; @@ -834,11 +834,11 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT } else { ASSERT(0); } - }else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA){ + } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { SStreamRawScanInfo* pInfo = pOperator->info; - SSnapContext* sContext = pInfo->sContext; - if(setForSnapShot(sContext, pOffset->uid) != 0) { - qError("setDataForSnapShot error. uid:%"PRIi64, pOffset->uid); + SSnapContext* sContext = pInfo->sContext; + if (setForSnapShot(sContext, pOffset->uid) != 0) { + qError("setDataForSnapShot error. uid:%" PRIi64, pOffset->uid); return -1; } @@ -847,27 +847,25 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT pInfo->dataReader = NULL; cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond); taosArrayDestroy(pTaskInfo->tableqinfoList.pTableList); - if(mtInfo.uid == 0) return 0; // no data + if (mtInfo.uid == 0) return 0; // no data initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, mtInfo); pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts; pTaskInfo->tableqinfoList.pTableList = taosArrayInit(1, sizeof(STableKeyInfo)); taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &(STableKeyInfo){.uid = mtInfo.uid, .groupId = 0}); - tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList, &pInfo->dataReader, NULL); + tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList, + &pInfo->dataReader, NULL); - strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName); - tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema); - pTaskInfo->streamInfo.schema = mtInfo.schema; qDebug("tmqsnap qStreamPrepareScan snapshot data uid %ld ts %ld", mtInfo.uid, pOffset->ts); - }else if(pOffset->type == TMQ_OFFSET__SNAPSHOT_META){ + } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) { SStreamRawScanInfo* pInfo = pOperator->info; - SSnapContext* sContext = pInfo->sContext; - if(setForSnapShot(sContext, pOffset->uid) != 0) { - qError("setForSnapShot error. uid:%"PRIi64" ,version:%"PRIi64, pOffset->uid); + SSnapContext* sContext = pInfo->sContext; + if (setForSnapShot(sContext, pOffset->uid) != 0) { + qError("setForSnapShot error. uid:%" PRIi64 " ,version:%" PRIi64, pOffset->uid); return -1; } qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %ld ts %ld", pOffset->uid); - }else if (pOffset->type == TMQ_OFFSET__LOG) { + } else if (pOffset->type == TMQ_OFFSET__LOG) { SStreamRawScanInfo* pInfo = pOperator->info; tsdbReaderClose(pInfo->dataReader); pInfo->dataReader = NULL; diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 4ffa80d468..f3ff13ef85 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -268,7 +268,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // add a new result set for a new group SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset}; tSimpleHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, - sizeof(SResultRowPosition)); + sizeof(SResultRowPosition)); } // 2. set the new time window to be the new active time window @@ -2815,92 +2815,6 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan } } } -#if 0 -int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { - uint8_t type = pOperator->operatorType; - - pOperator->status = OP_OPENED; - - if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { - SStreamScanInfo* pScanInfo = pOperator->info; - pScanInfo->blockType = STREAM_INPUT__TABLE_SCAN; - - pScanInfo->pTableScanOp->status = OP_OPENED; - - STableScanInfo* pInfo = pScanInfo->pTableScanOp->info; - ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER); - - if (uid == 0) { - pInfo->noTable = 1; - return TSDB_CODE_SUCCESS; - } - - /*if (pSnapShotScanInfo->dataReader == NULL) {*/ - /*pSnapShotScanInfo->dataReader = tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, 0, 0);*/ - /*pSnapShotScanInfo->scanMode = TABLE_SCAN__TABLE_ORDER;*/ - /*}*/ - - pInfo->noTable = 0; - - if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) { - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - - int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); - bool found = false; - for (int32_t i = 0; i < tableSz; i++) { - STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i); - if (pTableInfo->uid == uid) { - found = true; - pInfo->currentTable = i; - } - } - // TODO after processing drop, found can be false - ASSERT(found); - - tsdbSetTableId(pInfo->dataReader, uid); - int64_t oldSkey = pInfo->cond.twindows.skey; - pInfo->cond.twindows.skey = ts + 1; - tsdbReaderReset(pInfo->dataReader, &pInfo->cond); - pInfo->cond.twindows.skey = oldSkey; - pInfo->scanTimes = 0; - - qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, ts, - pInfo->currentTable, tableSz); - } - - return TSDB_CODE_SUCCESS; - - } else { - if (pOperator->numOfDownstream == 1) { - return doPrepareScan(pOperator->pDownstream[0], uid, ts); - } else if (pOperator->numOfDownstream == 0) { - qError("failed to find stream scan operator to set the input data block"); - return TSDB_CODE_QRY_APP_ERROR; - } else { - qError("join not supported for stream block scan"); - return TSDB_CODE_QRY_APP_ERROR; - } - } -} - -int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts) { - int32_t type = pOperator->operatorType; - if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { - SStreamScanInfo* pScanInfo = pOperator->info; - STableScanInfo* pSnapShotScanInfo = pScanInfo->pTableScanOp->info; - *uid = pSnapShotScanInfo->lastStatus.uid; - *ts = pSnapShotScanInfo->lastStatus.ts; - } else { - if (pOperator->pDownstream[0] == NULL) { - return TSDB_CODE_INVALID_PARA; - } else { - doGetScanStatus(pOperator->pDownstream[0], uid, ts); - } - } - - return TSDB_CODE_SUCCESS; -} -#endif // this is a blocking operator static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { @@ -3024,7 +2938,7 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len SResultRow* pRow = (SResultRow*)((char*)pPage + pos->offset); setBufPageDirty(pPage, true); releaseBufPage(pSup->pResultBuf, pPage); - + int32_t iter = 0; void* pIter = NULL; while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) { @@ -3434,7 +3348,7 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize, const char* pKey) { - int32_t code = 0; + int32_t code = 0; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pAggSup->currentPageId = -1; @@ -4294,42 +4208,6 @@ SArray* extractColumnInfo(SNodeList* pNodeList) { return pList; } -#if 0 -STsdbReader* doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableListInfo, const char* idstr) { - int32_t code = getTableList(pHandle->meta, pHandle->vnode, &pTableScanNode->scan, pTableListInfo); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { - code = 0; - qDebug("no table qualified for query, %s", idstr); - goto _error; - } - - SQueryTableDataCond cond = {0}; - code = initQueryTableDataCond(&cond, pTableScanNode); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - STsdbReader* pReader; - code = tsdbReaderOpen(pHandle->vnode, &cond, pTableListInfo->pTableList, &pReader, idstr); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - cleanupQueryTableDataCond(&cond); - - return pReader; - -_error: - terrno = code; - return NULL; -} -#endif - static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) { if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOperator->numOfDownstream == 0) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index d4c98adb7c..b3d865f591 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1219,7 +1219,7 @@ static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32 static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock) { SDataBlockInfo* pBlockInfo = &pInfo->pRes->info; SOperatorInfo* pOperator = pInfo->pStreamScanOp; - SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows); @@ -1228,7 +1228,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock pInfo->pRes->info.type = STREAM_NORMAL; pInfo->pRes->info.version = pBlock->info.version; - uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t)); + uint64_t* groupIdPre = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t)); if (groupIdPre) { pInfo->pRes->info.groupId = *groupIdPre; } else { @@ -1334,9 +1334,9 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } } else if (ret.fetchType == FETCH_TYPE__META) { ASSERT(0); -// pTaskInfo->streamInfo.lastStatus = ret.offset; -// pTaskInfo->streamInfo.metaBlk = ret.meta; -// return NULL; + // pTaskInfo->streamInfo.lastStatus = ret.offset; + // pTaskInfo->streamInfo.metaBlk = ret.meta; + // return NULL; } else if (ret.fetchType == FETCH_TYPE__NONE) { pTaskInfo->streamInfo.lastStatus = ret.offset; ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version); @@ -1554,14 +1554,14 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { } static SSDataBlock* doRawScan(SOperatorInfo* pOperator) { -// NOTE: this operator does never check if current status is done or not - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + // NOTE: this operator does never check if current status is done or not + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamRawScanInfo* pInfo = pOperator->info; - pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta + pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta pTaskInfo->streamInfo.metaRsp.metaRsp = NULL; qDebug("tmqsnap doRawScan called"); - if(pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA){ + if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { SSDataBlock* pBlock = &pInfo->pRes; if (pInfo->dataReader && tsdbNextDataBlock(pInfo->dataReader)) { @@ -1585,42 +1585,38 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) { } SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext); - if (mtInfo.uid == 0){ //read snapshot done, change to get data from wal + if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal qDebug("tmqsnap read snapshot done, change to get data from wal"); pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid; pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG; pTaskInfo->streamInfo.lastStatus.version = pInfo->sContext->snapVersion; - tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema); - }else{ + } else { pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid; pTaskInfo->streamInfo.prepareStatus.ts = INT64_MIN; qDebug("tmqsnap change get data uid:%ld", mtInfo.uid); qStreamPrepareScan(pTaskInfo, &pTaskInfo->streamInfo.prepareStatus, pInfo->sContext->subType); - strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName); - tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema); - pTaskInfo->streamInfo.schema = mtInfo.schema; } qDebug("tmqsnap stream scan tsdb return null"); return NULL; - }else if(pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META){ - SSnapContext *sContext = pInfo->sContext; - void* data = NULL; - int32_t dataLen = 0; - int16_t type = 0; - int64_t uid = 0; - if(getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0){ + } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) { + SSnapContext* sContext = pInfo->sContext; + void* data = NULL; + int32_t dataLen = 0; + int16_t type = 0; + int64_t uid = 0; + if (getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0) { qError("tmqsnap getMetafromSnapShot error"); taosMemoryFreeClear(data); return NULL; } - if(!sContext->queryMetaOrData){ // change to get data next poll request + if (!sContext->queryMetaOrData) { // change to get data next poll request pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META; pTaskInfo->streamInfo.lastStatus.uid = uid; pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__SNAPSHOT_DATA; pTaskInfo->streamInfo.metaRsp.rspOffset.uid = 0; pTaskInfo->streamInfo.metaRsp.rspOffset.ts = INT64_MIN; - }else{ + } else { pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META; pTaskInfo->streamInfo.lastStatus.uid = uid; pTaskInfo->streamInfo.metaRsp.rspOffset = pTaskInfo->streamInfo.lastStatus; @@ -1631,44 +1627,44 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) { return NULL; } -// else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { -// int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1; -// -// while(1){ -// if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) { -// qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer); -// pTaskInfo->streamInfo.lastStatus.version = fetchVer; -// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG; -// return NULL; -// } -// SWalCont* pHead = &pInfo->pCkHead->head; -// qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType); -// -// if (pHead->msgType == TDMT_VND_SUBMIT) { -// SSubmitReq* pCont = (SSubmitReq*)&pHead->body; -// tqReaderSetDataMsg(pInfo->tqReader, pCont, 0); -// SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid, &pInfo->pRes); -// if(block){ -// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG; -// pTaskInfo->streamInfo.lastStatus.version = fetchVer; -// qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType); -// return block; -// }else{ -// fetchVer++; -// } -// } else{ -// ASSERT(pInfo->sContext->withMeta); -// ASSERT(IS_META_MSG(pHead->msgType)); -// qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType); -// pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer; -// pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG; -// pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType; -// pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen; -// pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen); -// memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen); -// return NULL; -// } -// } + // else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { + // int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1; + // + // while(1){ + // if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) { + // qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer); + // pTaskInfo->streamInfo.lastStatus.version = fetchVer; + // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG; + // return NULL; + // } + // SWalCont* pHead = &pInfo->pCkHead->head; + // qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType); + // + // if (pHead->msgType == TDMT_VND_SUBMIT) { + // SSubmitReq* pCont = (SSubmitReq*)&pHead->body; + // tqReaderSetDataMsg(pInfo->tqReader, pCont, 0); + // SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid, + // &pInfo->pRes); if(block){ + // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG; + // pTaskInfo->streamInfo.lastStatus.version = fetchVer; + // qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType); + // return block; + // }else{ + // fetchVer++; + // } + // } else{ + // ASSERT(pInfo->sContext->withMeta); + // ASSERT(IS_META_MSG(pHead->msgType)); + // qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType); + // pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer; + // pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG; + // pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType; + // pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen; + // pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen); + // memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen); + // return NULL; + // } + // } return NULL; } @@ -1689,7 +1685,7 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pT // create tq reader SStreamRawScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamRawScanInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; return NULL; @@ -1699,13 +1695,12 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pT pInfo->sContext = pHandle->sContext; pOperator->name = "RawStreamScanOperator"; -// pOperator->blocking = false; -// pOperator->status = OP_NOT_OPENED; + // pOperator->blocking = false; + // pOperator->status = OP_NOT_OPENED; pOperator->info = pInfo; pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo, - NULL, NULL, NULL); + pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo, NULL, NULL, NULL); return pOperator; } @@ -1724,7 +1719,7 @@ static void destroyStreamScanOperatorInfo(void* param) { } if (pStreamScan->pPseudoExpr) { destroyExprInfo(pStreamScan->pPseudoExpr, pStreamScan->numOfPseudoExpr); - taosMemoryFreeClear(pStreamScan->pPseudoExpr); + taosMemoryFree(pStreamScan->pPseudoExpr); } updateInfoDestroy(pStreamScan->pUpdateInfo); From c6026f3ac58706156791e696abc574dde346e58e Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 11:35:27 +0800 Subject: [PATCH 48/72] test: modify checkpackages scritps --- packaging/testpackage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 4b6264db2b..758c554178 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -108,7 +108,7 @@ if [[ ${packgeName} =~ "deb" ]];then echo "./debAuto.sh ${packgeName}" && chmod 755 debAuto.sh && ./debAuto.sh ${packgeName} else echo "dpkg -i ${packgeName}" && dpkg -i ${packgeName} - + fi elif [[ ${packgeName} =~ "rpm" ]];then cd ${installPath} echo "rpm ${packgeName}" && rpm -ivh ${packgeName} --quiet From 2ccc7471af6e4104a4b0ebe1ca007ab451b8e881 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 31 Aug 2022 11:36:59 +0800 Subject: [PATCH 49/72] enh: query get dbcfg optimization --- include/libs/function/functionMgt.h | 3 ++- source/libs/function/src/functionMgt.c | 8 ++++++++ source/libs/parser/src/parAstParser.c | 28 +++++++++++++++++++------- source/libs/parser/src/parTranslater.c | 23 ++++++++++++--------- 4 files changed, 45 insertions(+), 17 deletions(-) diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 741b0fddeb..c9c19579cb 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -176,7 +176,8 @@ int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen); EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc); -bool fmIsBuiltinFunc(const char* pFunc); +bool fmIsBuiltinFunc(const char* pFunc); +EFunctionType fmGetFuncType(const char* pFunc); bool fmIsAggFunc(int32_t funcId); bool fmIsScalarFunc(int32_t funcId); diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 152a970c48..26735fa263 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -101,6 +101,14 @@ bool fmIsBuiltinFunc(const char* pFunc) { return NULL != taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc)); } +EFunctionType fmGetFuncType(const char* pFunc) { + void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc)); + if (NULL != pVal) { + return funcMgtBuiltins[*(int32_t*)pVal].type; + } + return FUNCTION_TYPE_UDF; +} + EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) { if (fmIsUserDefinedFunc(pFunc->funcId) || pFunc->funcId < 0 || pFunc->funcId >= funcMgtBuiltinsNum) { return FUNC_DATA_REQUIRED_DATA_LOAD; diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 82b5842663..207be939ca 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -97,16 +97,23 @@ typedef struct SCollectMetaKeyCxt { typedef struct SCollectMetaKeyFromExprCxt { SCollectMetaKeyCxt* pComCxt; + bool hasLastRow; int32_t errCode; } SCollectMetaKeyFromExprCxt; static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt); static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) { - if (fmIsBuiltinFunc(pFunc->functionName)) { - return DEAL_RES_CONTINUE; + switch (fmGetFuncType(pFunc->functionName)) { + case FUNCTION_TYPE_LAST_ROW: + pCxt->hasLastRow = true; + break; + case FUNCTION_TYPE_UDF: + pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache); + break; + default: + break; } - pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache); return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } @@ -136,9 +143,6 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, const c if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES))) { code = reserveDnodeRequiredInCache(pCxt->pMetaCache); } - if (TSDB_CODE_SUCCESS == code) { - code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pDb, pCxt->pMetaCache); - } return code; } @@ -185,9 +189,19 @@ static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOpera return code; } +static int32_t reserveDbCfgForLastRow(SCollectMetaKeyCxt* pCxt, SNode* pTable) { + if (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable)) { + return TSDB_CODE_SUCCESS; + } + return reserveDbCfgInCache(pCxt->pParseCxt->acctId, ((SRealTableNode*)pTable)->table.dbName, pCxt->pMetaCache); +} + static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) { - SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS}; + SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .hasLastRow = false, .errCode = TSDB_CODE_SUCCESS}; nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt); + if (TSDB_CODE_SUCCESS == cxt.errCode && cxt.hasLastRow) { + cxt.errCode = reserveDbCfgForLastRow(pCxt, pStmt->pFromTable); + } return cxt.errCode; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 51a4295ce5..49e3bd3349 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2160,15 +2160,16 @@ static int32_t setTableIndex(STranslateContext* pCxt, SName* pName, SRealTableNo return TSDB_CODE_SUCCESS; } -static int32_t setTableCacheLastMode(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) { - if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) { +static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSelect) { + if (!pSelect->hasLastRowFunc || QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable)) { return TSDB_CODE_SUCCESS; } - SDbCfgInfo dbCfg = {0}; - int32_t code = getDBCfg(pCxt, pRealTable->table.dbName, &dbCfg); + SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; + SDbCfgInfo dbCfg = {0}; + int32_t code = getDBCfg(pCxt, pTable->table.dbName, &dbCfg); if (TSDB_CODE_SUCCESS == code) { - pRealTable->cacheLastMode = dbCfg.cacheLast; + pTable->cacheLastMode = dbCfg.cacheLast; } return code; } @@ -2192,9 +2193,6 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) { if (TSDB_CODE_SUCCESS == code) { code = setTableIndex(pCxt, &name, pRealTable); } - if (TSDB_CODE_SUCCESS == code) { - code = setTableCacheLastMode(pCxt, &name, pRealTable); - } } if (TSDB_CODE_SUCCESS == code) { pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision; @@ -2273,10 +2271,14 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) { if (QUERY_NODE_COLUMN == nodeType(pExpr)) { SColumnNode* pCol = (SColumnNode*)pExpr; len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName); + strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1)); + len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pCol->colName); + strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1)); } else { len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->aliasName); + strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1)); + strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1)); } - strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1)); return (SNode*)pFunc; } @@ -3140,6 +3142,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect if (TSDB_CODE_SUCCESS == code) { code = replaceOrderByAliasForSelect(pCxt, pSelect); } + if (TSDB_CODE_SUCCESS == code) { + code = setTableCacheLastMode(pCxt, pSelect); + } return code; } From 63b2135b1cad24335542226a54fa0d523c85710a Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 11:42:25 +0800 Subject: [PATCH 50/72] test: modify checkpackages scritps --- packaging/testpackage.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 758c554178..b300041e29 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -130,12 +130,12 @@ elif [[ ${packgeName} =~ "tar" ]];then echo "check installPackage File" + + cd ${oriInstallPath}/${originTdpPath} && tree > ${installPath}/base_${originversion}_checkfile + cd ${installPath}/${tdPath} && tree > ${installPath}/now_${version}_checkfile + cd ${installPath} - - tree ${oriInstallPath}/${originTdpPath} > ${oriInstallPath}/${originPackageName}_checkfile - tree ${installPath}/${tdPath} > ${installPath}/${packgeName}_checkfile - - diff ${installPath}/${packgeName}_checkfile ${oriInstallPath}/${originPackageName}_checkfile > ${installPath}/diffFile.log + diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log diffNumbers=`cat ${installPath}/diffFile.log |wc -l ` if [ ${diffNumbers} != 0 ];then echo "The number and names of files have changed from the previous installation package" From a76cd4601024447edfb06ad91a06421d1143171a Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 11:46:51 +0800 Subject: [PATCH 51/72] test: modify checkpackages scritps --- packaging/testpackage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index b300041e29..d2ee0cad0b 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -115,7 +115,7 @@ elif [[ ${packgeName} =~ "rpm" ]];then elif [[ ${packgeName} =~ "tar" ]];then cd ${oriInstallPath} if [ ! -f {originPackageName} ];then - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community${originPackageName} . + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} . fi echo "tar -xvf ${originPackageName}" && tar -xvf ${originPackageName} From d59edad61046b387f889ea0914c523d00b452c52 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 11:52:14 +0800 Subject: [PATCH 52/72] test: modify checkpackages scritps --- packaging/testpackage.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index d2ee0cad0b..5332f84ddb 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -111,7 +111,9 @@ if [[ ${packgeName} =~ "deb" ]];then fi elif [[ ${packgeName} =~ "rpm" ]];then cd ${installPath} - echo "rpm ${packgeName}" && rpm -ivh ${packgeName} --quiet + sudo rpm -e tdengine + sudo rpm -e taostools + echo "rpm -ivh ${packgeName} --quiet " && rpm -ivh ${packgeName} --quiet elif [[ ${packgeName} =~ "tar" ]];then cd ${oriInstallPath} if [ ! -f {originPackageName} ];then From 500cf0c758747ec5f92d52cdbcea3a3af2900273 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Wed, 31 Aug 2022 13:30:35 +0800 Subject: [PATCH 53/72] feature: shell csv rfc 4180 --- tests/test/c/tmqSim.c | 30 +++++++++++++++++++++++++----- tools/shell/src/shellEngine.c | 5 ++++- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index d39ade7e91..71b31ba107 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -492,7 +492,6 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length, int32_t precision) { if (val == NULL) { - taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); return; } @@ -540,13 +539,34 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* f case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_JSON: - memcpy(buf, val, length); - buf[length] = 0; - taosFprintfFile(pFile, "\'%s\'", buf); + { + char quotationStr[2]; + int32_t bufIndex = 0; + quotationStr[0] = 0; + quotationStr[1] = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { + buf[bufIndex] = val[i]; + bufIndex++; + quotationStr[0] = '\"'; + } + if (val[i] == ',') { + quotationStr[0] = '\"'; + } + } + buf[bufIndex] = 0; + if (length == 0) { + quotationStr[0] = '\"'; + } + + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + } break; case TSDB_DATA_TYPE_TIMESTAMP: shellFormatTimestamp(buf, *(int64_t*)val, precision); - taosFprintfFile(pFile, "'%s'", buf); + taosFprintfFile(pFile, "%s", buf); break; default: break; diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 3b760a097f..e54b98a0a6 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -266,7 +266,6 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) { if (val == NULL) { - taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); return; } @@ -332,6 +331,10 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i } } buf[bufIndex] = 0; + if (length == 0) { + quotationStr[0] = '\"'; + } + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); } break; From e9d1733e8c5d4ec434c2e72e47f0088202516dc9 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 31 Aug 2022 13:27:08 +0800 Subject: [PATCH 54/72] fix tbname --- include/common/tmsg.h | 16 ++++ source/client/src/tmq.c | 13 ++- source/dnode/vnode/src/inc/tq.h | 16 ---- source/dnode/vnode/src/tq/tqExec.c | 3 + source/libs/executor/src/scanoperator.c | 116 +++++++++++++++--------- 5 files changed, 103 insertions(+), 61 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 7728b0b5eb..41bd11d347 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2626,6 +2626,22 @@ typedef struct { }; } STqOffsetVal; +static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) { + pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA; + pOffsetVal->uid = uid; + pOffsetVal->ts = ts; +} + +static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) { + pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META; + pOffsetVal->uid = uid; +} + +static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) { + pOffsetVal->type = TMQ_OFFSET__LOG; + pOffsetVal->version = ver; +} + int32_t tEncodeSTqOffsetVal(SEncoder* pEncoder, const STqOffsetVal* pOffsetVal); int32_t tDecodeSTqOffsetVal(SDecoder* pDecoder, STqOffsetVal* pOffsetVal); int32_t tFormatOffset(char* buf, int32_t maxLen, const STqOffsetVal* pVal); diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index 29d509c27c..f08f54ef4b 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -811,8 +811,19 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) { } int32_t tmq_unsubscribe(tmq_t* tmq) { + int32_t rsp; + int32_t retryCnt = 0; tmq_list_t* lst = tmq_list_new(); - int32_t rsp = tmq_subscribe(tmq, lst); + while (1) { + rsp = tmq_subscribe(tmq, lst); + if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) { + break; + } else { + retryCnt++; + taosMsleep(500); + } + } + tmq_list_destroy(lst); return rsp; } diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 753cdc603e..19dd321814 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -175,22 +175,6 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data); char* tqOffsetBuildFName(const char* path, int32_t ver); int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname); -static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) { - pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA; - pOffsetVal->uid = uid; - pOffsetVal->ts = ts; -} - -static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) { - pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META; - pOffsetVal->uid = uid; -} - -static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) { - pOffsetVal->type = TMQ_OFFSET__LOG; - pOffsetVal->version = ver; -} - // tqStream int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask); diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index bfd23f1a1a..dd98fe3194 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -137,6 +137,9 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) { continue; } + } else { + char* tbName = strdup(qExtractTbnameFromTask(task)); + taosArrayPush(pRsp->blockTbName, &tbName); } } if (pRsp->withSchema) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index b3d865f591..a6b5d68a84 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1276,6 +1276,74 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock return 0; } +static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamScanInfo* pInfo = pOperator->info; + + qDebug("stream scan called"); + if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { + SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); + if (pResult && pResult->info.rows > 0) { + qDebug("stream scan tsdb return %d rows", pResult->info.rows); + return pResult; + } else { + STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; + tsdbReaderClose(pTSInfo->dataReader); + pTSInfo->dataReader = NULL; + tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer); + qDebug("stream scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1); + if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) { + return NULL; + } + ASSERT(pInfo->tqReader->pWalReader->curVersion == pTaskInfo->streamInfo.snapshotVer + 1); + } + } + + if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { + while (1) { + SFetchRet ret = {0}; + tqNextBlock(pInfo->tqReader, &ret); + if (ret.fetchType == FETCH_TYPE__DATA) { + blockDataCleanup(pInfo->pRes); + if (setBlockIntoRes(pInfo, &ret.data) < 0) { + ASSERT(0); + } + // TODO clean data block + if (pInfo->pRes->info.rows > 0) { + qDebug("stream scan log return %d rows", pInfo->pRes->info.rows); + return pInfo->pRes; + } + } else if (ret.fetchType == FETCH_TYPE__META) { + ASSERT(0); + // pTaskInfo->streamInfo.lastStatus = ret.offset; + // pTaskInfo->streamInfo.metaBlk = ret.meta; + // return NULL; + } else if (ret.fetchType == FETCH_TYPE__NONE) { + pTaskInfo->streamInfo.lastStatus = ret.offset; + ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version); + ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion); + char formatBuf[80]; + tFormatOffset(formatBuf, 80, &ret.offset); + qDebug("stream scan log return null, offset %s", formatBuf); + return NULL; + } else { + ASSERT(0); + } + } + } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { + SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); + if (pResult && pResult->info.rows > 0) { + qDebug("stream scan tsdb return %d rows", pResult->info.rows); + return pResult; + } + qDebug("stream scan tsdb return null"); + return NULL; + } else { + ASSERT(0); + return NULL; + } +} + static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { // NOTE: this operator does never check if current status is done or not SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -1317,48 +1385,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } #endif - qDebug("stream scan called"); - if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { - while (1) { - SFetchRet ret = {0}; - tqNextBlock(pInfo->tqReader, &ret); - if (ret.fetchType == FETCH_TYPE__DATA) { - blockDataCleanup(pInfo->pRes); - if (setBlockIntoRes(pInfo, &ret.data) < 0) { - ASSERT(0); - } - // TODO clean data block - if (pInfo->pRes->info.rows > 0) { - qDebug("stream scan log return %d rows", pInfo->pRes->info.rows); - return pInfo->pRes; - } - } else if (ret.fetchType == FETCH_TYPE__META) { - ASSERT(0); - // pTaskInfo->streamInfo.lastStatus = ret.offset; - // pTaskInfo->streamInfo.metaBlk = ret.meta; - // return NULL; - } else if (ret.fetchType == FETCH_TYPE__NONE) { - pTaskInfo->streamInfo.lastStatus = ret.offset; - ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version); - ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion); - char formatBuf[80]; - tFormatOffset(formatBuf, 80, &ret.offset); - qDebug("stream scan log return null, offset %s", formatBuf); - return NULL; - } else { - ASSERT(0); - } - } - } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { - SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); - if (pResult && pResult->info.rows > 0) { - qDebug("stream scan tsdb return %d rows", pResult->info.rows); - return pResult; - } - qDebug("stream scan tsdb return null"); - return NULL; - } - if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) { STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond)); @@ -1810,6 +1836,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pInfo->readHandle = *pHandle; pInfo->tableUid = pScanPhyNode->uid; + pTaskInfo->streamInfo.snapshotVer = pHandle->version; // set the extract column id to streamHandle tqReaderSetColIdList(pInfo->tqReader, pColIds); @@ -1853,8 +1880,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamScan, NULL, NULL, destroyStreamScanOperatorInfo, - NULL, NULL, NULL); + __optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan; + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL, NULL, NULL); return pOperator; From 1c99eb1d4d56086622c32abefdeb809660aeb471 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 31 Aug 2022 15:27:41 +0800 Subject: [PATCH 55/72] fix set schema and tbname --- source/dnode/vnode/src/tq/tqExec.c | 4 ++-- source/libs/executor/src/executor.c | 4 ++++ source/libs/executor/src/scanoperator.c | 11 ++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index dd98fe3194..da596d07f9 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -85,11 +85,11 @@ int64_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs while (1) { SSDataBlock* pDataBlock = NULL; uint64_t ts = 0; - tqDebug("tmqsnap task start to execute"); + tqDebug("tmq task start to execute"); if (qExecTask(task, &pDataBlock, &ts) < 0) { ASSERT(0); } - tqDebug("tmqsnap task execute end, get %p", pDataBlock); + tqDebug("tmq task execute end, get %p", pDataBlock); if (pDataBlock) { tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 7e631ab3e9..f1ac9ef8b1 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -856,6 +856,10 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList, &pInfo->dataReader, NULL); + strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName); + tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema); + pTaskInfo->streamInfo.schema = mtInfo.schema; + qDebug("tmqsnap qStreamPrepareScan snapshot data uid %ld ts %ld", mtInfo.uid, pOffset->ts); } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) { SStreamRawScanInfo* pInfo = pOperator->info; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index a6b5d68a84..de6768b83a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1280,18 +1280,18 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamScanInfo* pInfo = pOperator->info; - qDebug("stream scan called"); + qDebug("queue scan called"); if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); if (pResult && pResult->info.rows > 0) { - qDebug("stream scan tsdb return %d rows", pResult->info.rows); + qDebug("queue scan tsdb return %d rows", pResult->info.rows); return pResult; } else { STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; tsdbReaderClose(pTSInfo->dataReader); pTSInfo->dataReader = NULL; tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer); - qDebug("stream scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1); + qDebug("queue scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1); if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) { return NULL; } @@ -1310,7 +1310,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { } // TODO clean data block if (pInfo->pRes->info.rows > 0) { - qDebug("stream scan log return %d rows", pInfo->pRes->info.rows); + qDebug("queue scan log return %d rows", pInfo->pRes->info.rows); return pInfo->pRes; } } else if (ret.fetchType == FETCH_TYPE__META) { @@ -1324,7 +1324,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion); char formatBuf[80]; tFormatOffset(formatBuf, 80, &ret.offset); - qDebug("stream scan log return null, offset %s", formatBuf); + qDebug("queue scan log return null, offset %s", formatBuf); return NULL; } else { ASSERT(0); @@ -1349,6 +1349,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamScanInfo* pInfo = pOperator->info; + qDebug("stream scan called"); #if 0 SStreamState* pState = pTaskInfo->streamInfo.pState; if (pState) { From bbe2a6541291aafb1510c2317162244821cb23bb Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 31 Aug 2022 15:29:13 +0800 Subject: [PATCH 56/72] Code format --- .clang-format | 1 - 1 file changed, 1 deletion(-) diff --git a/.clang-format b/.clang-format index e58d518b3b..56ca83e724 100644 --- a/.clang-format +++ b/.clang-format @@ -88,4 +88,3 @@ Standard: Auto TabWidth: 8 UseTab: Never ... - From cac4cb12e67659cc74f17a8e22a221f32140def6 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 31 Aug 2022 15:36:53 +0800 Subject: [PATCH 57/72] remove assert --- source/libs/index/src/indexFstFile.c | 3 ++- source/libs/transport/src/thttp.c | 25 +++++++++-------- source/libs/transport/src/transSvr.c | 40 ++++++++++++++++++---------- 3 files changed, 40 insertions(+), 28 deletions(-) diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c index 4f278c7af6..1900e50973 100644 --- a/source/libs/index/src/indexFstFile.c +++ b/source/libs/index/src/indexFstFile.c @@ -39,7 +39,8 @@ static void idxGenLRUKey(char* buf, const char* path, int32_t blockId) { } static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) { if (ctx->type == TFILE) { - assert(len == taosWriteFile(ctx->file.pFile, buf, len)); + int nwr = taosWriteFile(ctx->file.pFile, buf, len); + assert(nwr == len); } else { memcpy(ctx->mem.buf + ctx->offset, buf, len); } diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index e880a5abdb..275e7b42cc 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -21,6 +21,7 @@ #include "taoserror.h" #include "tlog.h" +// clang-format on #define HTTP_RECV_BUF_SIZE 1024 @@ -29,7 +30,7 @@ typedef struct SHttpClient { uv_tcp_t tcp; uv_write_t req; uv_buf_t* wbuf; - char *rbuf; + char* rbuf; char* addr; uint16_t port; } SHttpClient; @@ -130,37 +131,36 @@ static void destroyHttpClient(SHttpClient* cli) { taosMemoryFree(cli->rbuf); taosMemoryFree(cli->addr); taosMemoryFree(cli); - } static void clientCloseCb(uv_handle_t* handle) { SHttpClient* cli = handle->data; destroyHttpClient(cli); } -static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) { - SHttpClient* cli = handle->data; - buf->base = cli->rbuf; - buf->len = HTTP_RECV_BUF_SIZE; +static void clientAllocBuffCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) { + SHttpClient* cli = handle->data; + buf->base = cli->rbuf; + buf->len = HTTP_RECV_BUF_SIZE; } -static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) { - SHttpClient* cli = handle->data; +static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) { + SHttpClient* cli = handle->data; if (nread < 0) { uError("http-report recv error:%s", uv_err_name(nread)); } else { uTrace("http-report succ to recv %d bytes, just ignore it", nread); } uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); -} +} static void clientSentCb(uv_write_t* req, int32_t status) { SHttpClient* cli = req->data; if (status != 0) { terrno = TAOS_SYSTEM_ERROR(status); uError("http-report failed to send data %s", uv_strerror(status)); uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); - return; + return; } else { uTrace("http-report succ to send data"); } - uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb); + uv_read_start((uv_stream_t*)&cli->tcp, clientAllocBuffCb, clientRecvCb); } static void clientConnCb(uv_connect_t* req, int32_t status) { SHttpClient* cli = req->data; @@ -212,7 +212,7 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32 cli->tcp.data = cli; cli->req.data = cli; cli->wbuf = wb; - cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE); + cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE); cli->addr = tstrdup(server); cli->port = port; @@ -233,4 +233,3 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32 uv_loop_close(loop); return terrno; } -// clang-format on diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 46046b2a95..7007079f87 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -906,23 +906,30 @@ static void uvDestroyConn(uv_handle_t* handle) { } } static void uvPipeListenCb(uv_stream_t* handle, int status) { - ASSERT(status == 0); + if (status != 0) { + tError("server failed to init pipe"); + return; + } SServerObj* srv = container_of(handle, SServerObj, pipeListen); uv_pipe_t* pipe = &(srv->pipe[srv->numOfWorkerReady][0]); - ASSERT(0 == uv_pipe_init(srv->loop, pipe, 1)); - ASSERT(0 == uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe)); - ASSERT(1 == uv_is_readable((uv_stream_t*)pipe)); - ASSERT(1 == uv_is_writable((uv_stream_t*)pipe)); - ASSERT(0 == uv_is_closing((uv_handle_t*)pipe)); + int ret = uv_pipe_init(srv->loop, pipe, 1); + assert(ret == 0); + + ret = uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe); + assert(ret == 0); + + ret = uv_is_readable((uv_stream_t*)pipe); + assert(ret == 1); + + ret = uv_is_writable((uv_stream_t*)pipe); + assert(ret == 1); + + ret = uv_is_closing((uv_handle_t*)pipe); + assert(ret == 0); srv->numOfWorkerReady++; - - // ASSERT(0 == uv_listen((uv_stream_t*)&ctx.send.tcp, 512, uvOnAcceptCb)); - - // r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, read_cb); - // ASSERT(r == 0); } void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) { @@ -937,7 +944,9 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, srv->port = port; uv_loop_init(srv->loop); - assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0)); + int ret = uv_pipe_init(srv->loop, &srv->pipeListen, 0); + assert(ret == 0); + #ifdef WINDOWS char pipeName[64]; snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc.%p-" PRIu64, taosSafeRand(), GetCurrentProcessId()); @@ -946,8 +955,11 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, snprintf(pipeName, sizeof(pipeName), "%s%spipe.trans.rpc.%08X-" PRIu64, tsTempDir, TD_DIRSEP, taosSafeRand(), taosGetSelfPthreadId()); #endif - assert(0 == uv_pipe_bind(&srv->pipeListen, pipeName)); - assert(0 == uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb)); + ret = uv_pipe_bind(&srv->pipeListen, pipeName); + assert(ret == 0); + + ret = uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb); + assert(ret == 0); for (int i = 0; i < srv->numOfThreads; i++) { SWorkThrd* thrd = (SWorkThrd*)taosMemoryCalloc(1, sizeof(SWorkThrd)); From 8c0e46974bc599cce58acd0d33dd6f9202233baf Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 17:46:57 +0800 Subject: [PATCH 58/72] test: modify checkpackages scritps of rpm --- packaging/testpackage.sh | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 5332f84ddb..31840f2e6b 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -1,5 +1,6 @@ #!/bin/sh +scriptDir=$(dirname $(readlink -f $0)) packgeName=$1 version=$2 @@ -86,14 +87,24 @@ echo "download installPackage" # wget https://www.taosdata.com/assets-download/3.0/${originPackageName} cd ${installPath} +cp if [ ! -f {packgeName} ];then sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} . fi + +packageSuffix=`echo {packgeName} | awk -F '.' '{print $NF}'` + +cp -r ${scriptDir}/debAuto.sh if [ ! -f debAuto.sh ];then echo '#!/usr/bin/expect ' > debAuto.sh + echo 'set packgeName [lindex $argv 0]' >> debAuto.sh + echo 'set packageSuffix [lindex $argv 1]' >> debAuto.sh echo 'set timeout 3 ' >> debAuto.sh - echo 'pset packgeName [lindex $argv 0]' >> debAuto.sh - echo 'spawn dpkg -i ${packgeName}' >> debAuto.sh + echo 'if { ${packageSuffix} == "deb" } {' >> debAuto.sh + echo ' spawn dpkg -i ${packgeName} ' >> debAuto.sh + echo '} elseif { ${packageSuffix} == "rpm"} {' >> debAuto.sh + echo ' spawn rpm -ivh ${packgeName}' >> debAuto.sh + echo '}' >> debAuto.sh echo 'expect "*one:"' >> debAuto.sh echo 'send "\r"' >> debAuto.sh echo 'expect "*skip:"' >> debAuto.sh @@ -105,7 +116,7 @@ if [[ ${packgeName} =~ "deb" ]];then dpkg -r taostools dpkg -r tdengine if [[ ${packgeName} =~ "TDengine" ]];then - echo "./debAuto.sh ${packgeName}" && chmod 755 debAuto.sh && ./debAuto.sh ${packgeName} + echo "./debAuto.sh ${packgeName}" && chmod 755 debAuto.sh && ./debAuto.sh ${packgeName} ${packageSuffix} else echo "dpkg -i ${packgeName}" && dpkg -i ${packgeName} fi From fca5b0459edd40cb3b38781d5fc435ee3b21f911 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 17:58:39 +0800 Subject: [PATCH 59/72] test: modify checkpackages scritps of rpm and deb --- packaging/debRpmAutoInstall.sh | 13 ++++++++++ packaging/testpackage.sh | 44 +++++++++++++++++++--------------- 2 files changed, 38 insertions(+), 19 deletions(-) create mode 100755 packaging/debRpmAutoInstall.sh diff --git a/packaging/debRpmAutoInstall.sh b/packaging/debRpmAutoInstall.sh new file mode 100755 index 0000000000..1f51378c91 --- /dev/null +++ b/packaging/debRpmAutoInstall.sh @@ -0,0 +1,13 @@ +#!/usr/bin/expect +set packgeName [lindex $argv 0] +set packageSuffix [lindex $argv 1] +set timeout 3 +if { ${packageSuffix} == "deb" } { + spawn dpkg -i ${packgeName} +} elseif { ${packageSuffix} == "rpm"} { + spawn rpm -ivh ${packgeName} +} +expect "*one:" +send "\r" +expect "*skip:" +send "\r" diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 31840f2e6b..60acaf7b8d 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -87,28 +87,30 @@ echo "download installPackage" # wget https://www.taosdata.com/assets-download/3.0/${originPackageName} cd ${installPath} -cp +cp -r ${scriptDir}/debRpmAutoInstall.sh . + if [ ! -f {packgeName} ];then + echo "sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ." sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} . fi -packageSuffix=`echo {packgeName} | awk -F '.' '{print $NF}'` +packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}') -cp -r ${scriptDir}/debAuto.sh -if [ ! -f debAuto.sh ];then - echo '#!/usr/bin/expect ' > debAuto.sh - echo 'set packgeName [lindex $argv 0]' >> debAuto.sh - echo 'set packageSuffix [lindex $argv 1]' >> debAuto.sh - echo 'set timeout 3 ' >> debAuto.sh - echo 'if { ${packageSuffix} == "deb" } {' >> debAuto.sh - echo ' spawn dpkg -i ${packgeName} ' >> debAuto.sh - echo '} elseif { ${packageSuffix} == "rpm"} {' >> debAuto.sh - echo ' spawn rpm -ivh ${packgeName}' >> debAuto.sh - echo '}' >> debAuto.sh - echo 'expect "*one:"' >> debAuto.sh - echo 'send "\r"' >> debAuto.sh - echo 'expect "*skip:"' >> debAuto.sh - echo 'send "\r" ' >> debAuto.sh + +if [ ! -f debRpmAutoInstall.sh ];then + echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh + echo 'set packgeName [lindex $argv 0]' >> debRpmAutoInstall.sh + echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh + echo 'set timeout 3 ' >> debRpmAutoInstall.sh + echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh + echo ' spawn dpkg -i ${packgeName} ' >> debRpmAutoInstall.sh + echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh + echo ' spawn rpm -ivh ${packgeName}' >> debRpmAutoInstall.sh + echo '}' >> debRpmAutoInstall.sh + echo 'expect "*one:"' >> debRpmAutoInstall.sh + echo 'send "\r"' >> debRpmAutoInstall.sh + echo 'expect "*skip:"' >> debRpmAutoInstall.sh + echo 'send "\r" ' >> debRpmAutoInstall.sh fi if [[ ${packgeName} =~ "deb" ]];then @@ -116,7 +118,7 @@ if [[ ${packgeName} =~ "deb" ]];then dpkg -r taostools dpkg -r tdengine if [[ ${packgeName} =~ "TDengine" ]];then - echo "./debAuto.sh ${packgeName}" && chmod 755 debAuto.sh && ./debAuto.sh ${packgeName} ${packageSuffix} + echo "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix} else echo "dpkg -i ${packgeName}" && dpkg -i ${packgeName} fi @@ -124,7 +126,11 @@ elif [[ ${packgeName} =~ "rpm" ]];then cd ${installPath} sudo rpm -e tdengine sudo rpm -e taostools - echo "rpm -ivh ${packgeName} --quiet " && rpm -ivh ${packgeName} --quiet + if [[ ${packgeName} =~ "TDengine" ]];then + echo "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix} + else + echo "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName} + fi elif [[ ${packgeName} =~ "tar" ]];then cd ${oriInstallPath} if [ ! -f {originPackageName} ];then From 4401080328ad56532084169a69d3e64bbca98886 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 31 Aug 2022 18:06:17 +0800 Subject: [PATCH 60/72] enh: make cmake .. -DCMAKE_BUILD_TYPE=Release work --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 +- source/libs/executor/src/executil.c | 2 +- source/libs/executor/src/executorimpl.c | 4 ++-- source/libs/function/src/builtinsimpl.c | 1 + source/libs/function/src/tudf.c | 2 +- source/libs/function/src/udfd.c | 8 ++++---- source/libs/index/src/indexComm.c | 1 + source/libs/sync/src/syncIndexMgr.c | 3 +++ source/libs/transport/src/trans.c | 2 +- source/util/src/talgo.c | 1 + source/util/src/tcompare.c | 1 + source/util/src/tlog.c | 5 ++++- tests/test/c/sdbDump.c | 6 ++++++ tests/tsim/src/simExe.c | 3 +++ tools/shell/src/shellCommand.c | 3 +++ tools/shell/src/shellEngine.c | 5 ++++- 16 files changed, 37 insertions(+), 12 deletions(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index a059db6b00..b91b82b72e 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -301,7 +301,7 @@ int32_t dmInitServer(SDnode *pDnode) { SDnodeTrans *pTrans = &pDnode->trans; SRpcInit rpcInit = {0}; - strncpy(rpcInit.localFqdn, tsLocalFqdn, strlen(tsLocalFqdn)); + strncpy(rpcInit.localFqdn, tsLocalFqdn, TSDB_FQDN_LEN); rpcInit.localPort = tsServerPort; rpcInit.label = "DND-S"; rpcInit.numOfThreads = tsNumOfRpcThreads; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 80c1494f8d..70180d6dc0 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1190,7 +1190,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet); } else { char* udfName = pExpr->pExpr->_function.pFunctNode->functionName; - strncpy(pCtx->udfName, udfName, strlen(udfName)); + strncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN); fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet); } pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index f3ff13ef85..b53d35a1a1 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3982,8 +3982,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo STableKeyInfo* pKeyInfo = taosArrayGet(pTableListInfo->pTableList, i); qDebug("creating stream task: add table %" PRId64, pKeyInfo->uid); } - } #endif + } pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan); pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTaskInfo); @@ -4449,7 +4449,7 @@ _complete: return code; } -static void doDestroyTableList(STableListInfo* pTableqinfoList) { +void doDestroyTableList(STableListInfo* pTableqinfoList) { taosArrayDestroy(pTableqinfoList->pTableList); taosHashCleanup(pTableqinfoList->map); if (pTableqinfoList->needSortTableByGroupId) { diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index b71d06231e..84d3a04807 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -1160,6 +1160,7 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c } ASSERT(0); + return 0; } int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index d5a3e91eea..5de9c52cc1 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -81,7 +81,7 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) { taosDirName(path); #endif } else { - strncpy(path, tsProcPath, strlen(tsProcPath)); + strncpy(path, tsProcPath, PATH_MAX); taosDirName(path); } #ifdef WINDOWS diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 5b27e030b9..a45e4585e8 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -453,7 +453,7 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { goto _return; } taosCloseFile(&file); - strncpy(udf->path, path, strlen(path)); + strncpy(udf->path, path, PATH_MAX); tFreeSFuncInfo(pFuncInfo); taosArrayDestroy(retrieveRsp.pFuncInfos); msgInfo->code = 0; @@ -566,17 +566,17 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) { uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->aggProcFunc)); char startFuncName[TSDB_FUNC_NAME_LEN + 6] = {0}; char *startSuffix = "_start"; - strncpy(startFuncName, processFuncName, strlen(processFuncName)); + strncpy(startFuncName, processFuncName, sizeof(startFuncName)); strncat(startFuncName, startSuffix, strlen(startSuffix)); uv_dlsym(&udf->lib, startFuncName, (void **)(&udf->aggStartFunc)); char finishFuncName[TSDB_FUNC_NAME_LEN + 7] = {0}; char *finishSuffix = "_finish"; - strncpy(finishFuncName, processFuncName, strlen(processFuncName)); + strncpy(finishFuncName, processFuncName, sizeof(finishFuncName)); strncat(finishFuncName, finishSuffix, strlen(finishSuffix)); uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc)); char mergeFuncName[TSDB_FUNC_NAME_LEN + 6] = {0}; char *mergeSuffix = "_merge"; - strncpy(finishFuncName, processFuncName, strlen(processFuncName)); + strncpy(finishFuncName, processFuncName, sizeof(finishFuncName)); strncat(finishFuncName, mergeSuffix, strlen(mergeSuffix)); uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggMergeFunc)); } diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c index 4f33d98f9e..cd52d122f7 100644 --- a/source/libs/index/src/indexComm.c +++ b/source/libs/index/src/indexComm.c @@ -171,6 +171,7 @@ TExeCond tCompare(__compar_fn_t func, int8_t cmptype, void* a, void* b, int8_t d return tDoCompare(func, cmptype, &va, &vb); } assert(0); + return BREAK; #endif } TExeCond tDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) { diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c index 07c4fa8429..3bda9bcd51 100644 --- a/source/libs/sync/src/syncIndexMgr.c +++ b/source/libs/sync/src/syncIndexMgr.c @@ -163,6 +163,7 @@ int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pR } } ASSERT(0); + return -1; } void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime) { @@ -190,6 +191,7 @@ int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRa } } ASSERT(0); + return -1; } // for debug ------------------- @@ -245,4 +247,5 @@ SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftI } } ASSERT(0); + return -1; } \ No newline at end of file diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 0a0dcef378..9e0a8f2a10 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -43,7 +43,7 @@ void* rpcOpen(const SRpcInit* pInit) { return NULL; } if (pInit->label) { - tstrncpy(pRpc->label, pInit->label, strlen(pInit->label) + 1); + tstrncpy(pRpc->label, pInit->label, TSDB_LABEL_LEN); } // register callback handle pRpc->cfp = pInit->cfp; diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c index 5353cd9bfe..699f0db7a1 100644 --- a/source/util/src/talgo.c +++ b/source/util/src/talgo.c @@ -201,6 +201,7 @@ void *taosbsearch(const void *key, const void *base, int32_t nmemb, int32_t size return (c > 0) ? p : (midx > 0 ? p - size : NULL); } else { ASSERT(0); + return NULL; } } diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index 7032f39744..cbda4e4655 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -244,6 +244,7 @@ int32_t compareJsonVal(const void *pLeft, const void *pRight) { return 0; }else{ assert(0); + return 0; } } diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 06ebbf27fb..d35670cb7f 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -446,7 +446,10 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b } if (dflag & DEBUG_SCREEN) { - write(1, buffer, (uint32_t)len); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-result" + write(STDOUT_FILENO, buffer, (uint32_t)len); +#pragma GCC diagnostic pop } } diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c index aef5056031..b90b781e44 100644 --- a/tests/test/c/sdbDump.c +++ b/tests/test/c/sdbDump.c @@ -20,6 +20,9 @@ #include "tconfig.h" #include "tjson.h" +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-result" + #define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb" #define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" #define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data" @@ -429,6 +432,7 @@ int32_t parseArgs(int32_t argc, char *argv[]) { char cmd[PATH_MAX * 2] = {0}; snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR); + system(cmd); #ifdef WINDOWS taosMulMkDir(TMP_SDB_DATA_DIR); @@ -467,3 +471,5 @@ int32_t main(int32_t argc, char *argv[]) { return dumpSdb(); } + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index b993a8dbf1..16732ff9a1 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -464,7 +464,10 @@ void simStoreSystemContentResult(SScript *script, char *filename) { taosCloseFile(&pFile); char rmCmd[MAX_FILE_NAME_LEN] = {0}; sprintf(rmCmd, "rm -f %s", filename); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-result" system(rmCmd); +#pragma GCC diagnostic pop } } diff --git a/tools/shell/src/shellCommand.c b/tools/shell/src/shellCommand.c index d87e10fd08..b73317e991 100644 --- a/tools/shell/src/shellCommand.c +++ b/tools/shell/src/shellCommand.c @@ -510,7 +510,10 @@ int32_t shellReadCommand(char *command) { shellClearLineAfter(&cmd); break; case 12: // Ctrl + L; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-result" system("clear"); +#pragma GCC diagnostic pop shellShowOnScreen(&cmd); break; case 21: // Ctrl + U; diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index e54b98a0a6..45d5489803 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -62,7 +62,10 @@ int32_t shellRunSingleCommand(char *command) { } if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) { - system("clear"); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-result" + system("clear"); +#pragma GCC diagnostic pop return 0; } From c5874ca892b0296bc2dc40abc76ce9789f473cb7 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 31 Aug 2022 18:32:28 +0800 Subject: [PATCH 61/72] fix: add filter logic for tmq in stable wal --- include/common/tmsg.h | 1 + source/common/src/tmsg.c | 2 + source/dnode/vnode/src/tq/tqMeta.c | 52 +++++- source/dnode/vnode/src/tq/tqRead.c | 164 ++++++++++++++++++- source/libs/parser/src/parTranslater.c | 6 +- source/libs/wal/src/walRead.c | 2 +- tests/test/c/tmq_taosx_ci.c | 216 ++++++++++++++++--------- 7 files changed, 358 insertions(+), 85 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 681094471a..a1a967f533 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2073,6 +2073,7 @@ int32_t tDeserializeSVCreateTbBatchRsp(void* buf, int32_t bufLen, SVCreateTbBatc // TDMT_VND_DROP_TABLE ================= typedef struct { char* name; + uint64_t suid; // for tmq in wal format int8_t igNotExists; } SVDropTbReq; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 8dc4931573..618baa5b37 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5141,6 +5141,7 @@ static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) { if (tStartEncode(pCoder) < 0) return -1; if (tEncodeCStr(pCoder, pReq->name) < 0) return -1; + if (tEncodeU64(pCoder, pReq->suid) < 0) return -1; if (tEncodeI8(pCoder, pReq->igNotExists) < 0) return -1; tEndEncode(pCoder); @@ -5151,6 +5152,7 @@ static int32_t tDecodeSVDropTbReq(SDecoder *pCoder, SVDropTbReq *pReq) { if (tStartDecode(pCoder) < 0) return -1; if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1; + if (tDecodeU64(pCoder, &pReq->suid) < 0) return -1; if (tDecodeI8(pCoder, &pReq->igNotExists) < 0) return -1; tEndDecode(pCoder); diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index a192d1f863..62f8debccb 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -18,12 +18,25 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1; + if (tEncodeI8(pEncoder, pHandle->fetchMeta) < 0) return -1; if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1; if (tEncodeI64(pEncoder, pHandle->snapshotVer) < 0) return -1; if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1; if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1; if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { if (tEncodeCStr(pEncoder, pHandle->execHandle.execCol.qmsg) < 0) return -1; + } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB){ + int32_t size = taosHashGetSize(pHandle->execHandle.execDb.pFilterOutTbUid); + if (tEncodeI32(pEncoder, size) < 0) return -1; + void *pIter = NULL; + pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter); + while(pIter){ + int64_t *tbUid = (int64_t *)taosHashGetKey(pIter, NULL); + if (tEncodeI64(pEncoder, *tbUid) < 0) return -1; + pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter); + } + } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE){ + if (tEncodeI64(pEncoder, pHandle->execHandle.execTb.suid) < 0) return -1; } tEndEncode(pEncoder); return pEncoder->pos; @@ -32,12 +45,25 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1; + if (tDecodeI8(pDecoder, &pHandle->fetchMeta) < 0) return -1; if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1; if (tDecodeI64(pDecoder, &pHandle->snapshotVer) < 0) return -1; if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1; if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1; if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.execCol.qmsg) < 0) return -1; + }else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB){ + pHandle->execHandle.execDb.pFilterOutTbUid = + taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + int32_t size = 0; + if (tDecodeI32(pDecoder, &size) < 0) return -1; + for(int32_t i = 0; i < size; i++){ + int64_t tbUid = 0; + if (tDecodeI64(pDecoder, &tbUid) < 0) return -1; + taosHashPut(pHandle->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0); + } + } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE){ + if (tDecodeI64(pDecoder, &pHandle->execHandle.execTb.suid) < 0) return -1; } tEndDecode(pDecoder); return 0; @@ -267,14 +293,28 @@ int32_t tqMetaRestoreHandle(STQ* pTq) { ASSERT(scanner); handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner); ASSERT(handle.execHandle.pExecReader); - } else { - + } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) { handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); - handle.execHandle.execDb.pFilterOutTbUid = - taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); -// handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode); - buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext)); + handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode); + buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext)); + handle.execHandle.task = + qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL); + } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) { + handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); + + SArray* tbUidList = taosArrayInit(0, sizeof(int64_t)); + vnodeGetCtbIdList(pTq->pVnode, handle.execHandle.execTb.suid, tbUidList); + tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid); + for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) { + int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i); + tqDebug("vgId:%d, idx %d, uid:%" PRId64, TD_VID(pTq->pVnode), i, tbUid); + } + handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode); + tqReaderSetTbUidList(handle.execHandle.pExecReader, tbUidList); + taosArrayDestroy(tbUidList); + + buildSnapContext(reader.meta, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext)); handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL); } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 6e2a6fdb71..375130fa2c 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -15,6 +15,162 @@ #include "tq.h" + +bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){ + if(pHandle->execHandle.subType != TOPIC_SUB_TYPE__TABLE){ + return true; + } + + int16_t msgType = pHead->msgType; + char* body = pHead->body; + int32_t bodyLen = pHead->bodyLen; + + int64_t tbSuid = pHandle->execHandle.execTb.suid; + int64_t realTbSuid = 0; + SDecoder coder; + void* data = POINTER_SHIFT(body, sizeof(SMsgHead)); + int32_t len = bodyLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + + if (msgType == TDMT_VND_CREATE_STB || msgType == TDMT_VND_ALTER_STB) { + SVCreateStbReq req = {0}; + if (tDecodeSVCreateStbReq(&coder, &req) < 0) { + goto end; + } + realTbSuid = req.suid; + } else if (msgType == TDMT_VND_DROP_STB) { + SVDropStbReq req = {0}; + if (tDecodeSVDropStbReq(&coder, &req) < 0) { + goto end; + } + realTbSuid = req.suid; + } else if (msgType == TDMT_VND_CREATE_TABLE) { + SVCreateTbBatchReq req = {0}; + if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) { + goto end; + } + + int32_t needRebuild = 0; + SVCreateTbReq* pCreateReq = NULL; + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { + pCreateReq = req.pReqs + iReq; + if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){ + needRebuild++; + } + } + if(needRebuild == 0){ + // do nothing + }else if(needRebuild == req.nReqs){ + realTbSuid = tbSuid; + }else{ + realTbSuid = tbSuid; + SVCreateTbBatchReq reqNew = {0}; + reqNew.pArray = taosArrayInit(req.nReqs, sizeof(struct SVCreateTbReq)); + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { + pCreateReq = req.pReqs + iReq; + if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){ + reqNew.nReqs++; + taosArrayPush(reqNew.pArray, pCreateReq); + } + } + + int tlen; + int32_t ret = 0; + tEncodeSize(tEncodeSVCreateTbBatchReq, &reqNew, tlen, ret); + void* buf = taosMemoryMalloc(tlen); + if (NULL == buf) { + taosArrayDestroy(reqNew.pArray); + goto end; + } + SEncoder coderNew = {0}; + tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead)); + tEncodeSVCreateTbBatchReq(&coderNew, &reqNew); + tEncoderClear(&coderNew); + memcpy(pHead->body + sizeof(SMsgHead), buf, tlen); + pHead->bodyLen = tlen + sizeof(SMsgHead); + taosMemoryFree(buf); + taosArrayDestroy(reqNew.pArray); + } + } else if (msgType == TDMT_VND_ALTER_TABLE) { + SVAlterTbReq req = {0}; + + if (tDecodeSVAlterTbReq(&coder, &req) < 0) { + goto end; + } + + SMetaReader mr = {0}; + metaReaderInit(&mr, pHandle->execHandle.pExecReader->pVnodeMeta, 0); + + if (metaGetTableEntryByName(&mr, req.tbName) < 0) { + metaReaderClear(&mr); + goto end; + } + realTbSuid = mr.me.ctbEntry.suid; + metaReaderClear(&mr); + } else if (msgType == TDMT_VND_DROP_TABLE) { + SVDropTbBatchReq req = {0}; + + if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) { + goto end; + } + + int32_t needRebuild = 0; + SVDropTbReq* pDropReq = NULL; + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { + pDropReq = req.pReqs + iReq; + + if(pDropReq->suid == tbSuid){ + needRebuild++; + } + } + if(needRebuild == 0){ + // do nothing + }else if(needRebuild == req.nReqs){ + realTbSuid = tbSuid; + }else{ + realTbSuid = tbSuid; + SVDropTbBatchReq reqNew = {0}; + reqNew.pArray = taosArrayInit(req.nReqs, sizeof(SVDropTbReq)); + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { + pDropReq = req.pReqs + iReq; + if(pDropReq->suid == tbSuid){ + reqNew.nReqs++; + taosArrayPush(reqNew.pArray, pDropReq); + } + } + + int tlen; + int32_t ret = 0; + tEncodeSize(tEncodeSVDropTbBatchReq, &reqNew, tlen, ret); + void* buf = taosMemoryMalloc(tlen); + if (NULL == buf) { + taosArrayDestroy(reqNew.pArray); + goto end; + } + SEncoder coderNew = {0}; + tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead)); + tEncodeSVDropTbBatchReq(&coderNew, &reqNew); + tEncoderClear(&coderNew); + memcpy(pHead->body + sizeof(SMsgHead), buf, tlen); + pHead->bodyLen = tlen + sizeof(SMsgHead); + taosMemoryFree(buf); + taosArrayDestroy(reqNew.pArray); + } + } else if (msgType == TDMT_VND_DELETE) { + SDeleteRes req = {0}; + if (tDecodeDeleteRes(&coder, &req) < 0) { + goto end; + } + realTbSuid = req.suid; + } else{ + ASSERT(0); + } + + end: + tDecoderClear(&coder); + return tbSuid == realTbSuid; +} + int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead) { int32_t code = 0; taosThreadMutexLock(&pHandle->pWalReader->mutex); @@ -53,9 +209,11 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea code = -1; goto END; } - *fetchOffset = offset; - code = 0; - goto END; + if(isValValidForTable(pHandle, pHead)){ + *fetchOffset = offset; + code = 0; + goto END; + } } } code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index d938325ef2..54c759fa39 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -6351,8 +6351,8 @@ typedef struct SVgroupDropTableBatch { char dbName[TSDB_DB_NAME_LEN]; } SVgroupDropTableBatch; -static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo) { - SVDropTbReq req = {.name = pClause->tableName, .igNotExists = pClause->ignoreNotExists}; +static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo, uint64_t suid) { + SVDropTbReq req = {.name = pClause->tableName, .suid = suid, .igNotExists = pClause->ignoreNotExists}; SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pVgInfo->vgId, sizeof(pVgInfo->vgId)); if (NULL == pTableBatch) { SVgroupDropTableBatch tBatch = {0}; @@ -6393,7 +6393,7 @@ static int32_t buildDropTableVgroupHashmap(STranslateContext* pCxt, SDropTableCl code = getTableHashVgroup(pCxt, pClause->dbName, pClause->tableName, &info); } if (TSDB_CODE_SUCCESS == code) { - addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info); + addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info, pTableMeta->suid); } over: diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index a5b5a2b7b4..9db7d6c455 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -372,7 +372,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead) { int64_t code; - ASSERT(pRead->curVersion == pHead->head.version); +// ASSERT(pRead->curVersion == pHead->head.version); code = taosLSeekFile(pRead->pLogFile, pHead->head.bodyLen, SEEK_CUR); if (code < 0) { diff --git a/tests/test/c/tmq_taosx_ci.c b/tests/test/c/tmq_taosx_ci.c index 2afa05b012..f917b9159e 100644 --- a/tests/test/c/tmq_taosx_ci.c +++ b/tests/test/c/tmq_taosx_ci.c @@ -26,6 +26,7 @@ TdFilePtr g_fp = NULL; typedef struct{ bool snapShot; bool dropTable; + bool subTable; int srcVgroups; int dstVgroups; char dir[64]; @@ -74,57 +75,7 @@ static void msg_process(TAOS_RES* msg) { taos_close(pConn); } -int32_t init_env(Config *conf) { - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { - return -1; - } - - TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx"); - if (taos_errno(pRes) != 0) { - printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - char sql[128] = {0}; - snprintf(sql, 128, "create database if not exists db_taosx vgroups %d", conf->dstVgroups); - pRes = taos_query(pConn, sql); - if (taos_errno(pRes) != 0) { - printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop topic if exists topic_db"); - if (taos_errno(pRes) != 0) { - printf("error in drop topic, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop database if exists abc1"); - if (taos_errno(pRes) != 0) { - printf("error in drop db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - snprintf(sql, 128, "create database if not exists abc1 vgroups %d", conf->srcVgroups); - pRes = taos_query(pConn, sql); - if (taos_errno(pRes) != 0) { - printf("error in create db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - +int buildDatabase(TAOS* pConn, TAOS_RES* pRes){ pRes = taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 " "nchar(8), t4 bool)"); @@ -232,7 +183,7 @@ int32_t init_env(Config *conf) { } taos_free_result(pRes); - if(conf->dropTable){ + if(g_conf.dropTable){ pRes = taos_query(pConn, "drop table ct3 ct1"); if (taos_errno(pRes) != 0) { printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes)); @@ -297,7 +248,7 @@ int32_t init_env(Config *conf) { } taos_free_result(pRes); - if(conf->dropTable){ + if(g_conf.dropTable){ pRes = taos_query(pConn, "drop table n1"); if (taos_errno(pRes) != 0) { printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes)); @@ -341,7 +292,7 @@ int32_t init_env(Config *conf) { } taos_free_result(pRes); - if(conf->dropTable){ + if(g_conf.dropTable){ pRes = taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 " "nchar(8), t4 bool)"); @@ -358,6 +309,112 @@ int32_t init_env(Config *conf) { } taos_free_result(pRes); } + return 0; +} + +int buildStable(TAOS* pConn, TAOS_RES* pRes){ + pRes = taos_query(pConn, "CREATE STABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` VARCHAR(16))"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table meters, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table d0 using meters tags(1, 'San Francisco')"); + if (taos_errno(pRes) != 0) { + printf("failed to create child table d0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table d1 using meters tags(2, 'Beijing')"); + if (taos_errno(pRes) != 0) { + printf("failed to create child table d1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create stream meters_summary_s into meters_summary as select _wstart, max(current) as current, groupid, location from meters partition by groupid, location interval(10m)"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table meters_summary, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into d0 (ts, current) values (now, 120)"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into table d0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + return 0; +} + +int32_t init_env() { + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx"); + if (taos_errno(pRes) != 0) { + printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + char sql[128] = {0}; + snprintf(sql, 128, "create database if not exists db_taosx vgroups %d", g_conf.dstVgroups); + pRes = taos_query(pConn, sql); + if (taos_errno(pRes) != 0) { + printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "drop topic if exists topic_db"); + if (taos_errno(pRes) != 0) { + printf("error in drop topic, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "drop topic if exists meters_summary_t1"); + if (taos_errno(pRes) != 0) { + printf("error in drop topic, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "drop database if exists abc1"); + if (taos_errno(pRes) != 0) { + printf("error in drop db, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + snprintf(sql, 128, "create database if not exists abc1 vgroups %d", g_conf.srcVgroups); + pRes = taos_query(pConn, sql); + if (taos_errno(pRes) != 0) { + printf("error in create db, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "use abc1"); + if (taos_errno(pRes) != 0) { + printf("error in use db, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + if(g_conf.subTable){ + buildStable(pConn, pRes); + }else{ + buildDatabase(pConn, pRes); + } + taos_close(pConn); return 0; } @@ -377,12 +434,21 @@ int32_t create_topic() { } taos_free_result(pRes); - pRes = taos_query(pConn, "create topic topic_db with meta as database abc1"); - if (taos_errno(pRes) != 0) { - printf("failed to create topic topic_db, reason:%s\n", taos_errstr(pRes)); - return -1; + if(g_conf.subTable){ + pRes = taos_query(pConn, "create topic meters_summary_t1 with meta as stable meters_summary"); + if (taos_errno(pRes) != 0) { + printf("failed to create topic meters_summary_t1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + }else{ + pRes = taos_query(pConn, "create topic topic_db with meta as database abc1"); + if (taos_errno(pRes) != 0) { + printf("failed to create topic topic_db, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); } - taos_free_result(pRes); taos_close(pConn); return 0; @@ -392,7 +458,7 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { printf("commit %d tmq %p param %p\n", code, tmq, param); } -tmq_t* build_consumer(Config *config) { +tmq_t* build_consumer() { tmq_conf_t* conf = tmq_conf_new(); tmq_conf_set(conf, "group.id", "tg2"); tmq_conf_set(conf, "client.id", "my app 1"); @@ -402,7 +468,7 @@ tmq_t* build_consumer(Config *config) { tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set(conf, "enable.heartbeat.background", "true"); - if(config->snapShot){ + if(g_conf.snapShot){ tmq_conf_set(conf, "experimental.snapshot.enable", "true"); } @@ -415,7 +481,11 @@ tmq_t* build_consumer(Config *config) { tmq_list_t* build_topic_list() { tmq_list_t* topic_list = tmq_list_new(); - tmq_list_append(topic_list, "topic_db"); + if(g_conf.subTable){ + tmq_list_append(topic_list, "meters_summary_t1"); + }else{ + tmq_list_append(topic_list, "topic_db"); + } return topic_list; } @@ -446,16 +516,16 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { fprintf(stderr, "%% Consumer closed\n"); } -void initLogFile(Config *conf) { +void initLogFile() { char f1[256] = {0}; char f2[256] = {0}; - if(conf->snapShot){ - sprintf(f1, "%s/../log/tmq_taosx_tmp_snapshot.source", conf->dir); - sprintf(f2, "%s/../log/tmq_taosx_tmp_snapshot.result", conf->dir); + if(g_conf.snapShot){ + sprintf(f1, "%s/../log/tmq_taosx_tmp_snapshot.source", g_conf.dir); + sprintf(f2, "%s/../log/tmq_taosx_tmp_snapshot.result", g_conf.dir); }else{ - sprintf(f1, "%s/../log/tmq_taosx_tmp.source", conf->dir); - sprintf(f2, "%s/../log/tmq_taosx_tmp.result", conf->dir); + sprintf(f1, "%s/../log/tmq_taosx_tmp.source", g_conf.dir); + sprintf(f2, "%s/../log/tmq_taosx_tmp.result", g_conf.dir); } TdFilePtr pFile = taosOpenFile(f1, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM); @@ -471,7 +541,7 @@ void initLogFile(Config *conf) { exit(-1); } - if(conf->snapShot){ + if(g_conf.snapShot){ char *result[] = { "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":64},{\"name\":\"c4\",\"type\":5}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1},{\"name\":\"t2\",\"type\":8,\"length\":64}]}", "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}", @@ -531,20 +601,22 @@ int main(int argc, char* argv[]) { g_conf.srcVgroups = atol(argv[++i]); }else if(strcmp(argv[i], "-dv") == 0){ g_conf.dstVgroups = atol(argv[++i]); + }else if(strcmp(argv[i], "-t") == 0){ + g_conf.subTable = true; } } printf("env init\n"); if(strlen(g_conf.dir) != 0){ - initLogFile(&g_conf); + initLogFile(); } - if (init_env(&g_conf) < 0) { + if (init_env() < 0) { return -1; } create_topic(); - tmq_t* tmq = build_consumer(&g_conf); + tmq_t* tmq = build_consumer(); tmq_list_t* topic_list = build_topic_list(); basic_consume_loop(tmq, topic_list); taosCloseFile(&g_fp); From f778854302baeffb4a72af9d4b3b606de30af46a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 31 Aug 2022 18:42:06 +0800 Subject: [PATCH 62/72] fix:memory leak --- source/client/src/clientSml.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index c5ce2f632c..23dc0c0864 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -356,6 +356,7 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray* pColumns, goto end; } + pRequest->syncQuery = true; if (!pRequest->pDb) { code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; goto end; From 7b3e10034c8f03226a48b795dacc4c6c3a357e06 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 31 Aug 2022 18:54:08 +0800 Subject: [PATCH 63/72] fix: compile error in windows --- source/util/src/tlog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index d35670cb7f..46203658f1 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -448,7 +448,7 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b if (dflag & DEBUG_SCREEN) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-result" - write(STDOUT_FILENO, buffer, (uint32_t)len); + write(1, buffer, (uint32_t)len); #pragma GCC diagnostic pop } } From c4bcb4e1e36517f6dde815af7e2d83429114c40d Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 31 Aug 2022 19:06:03 +0800 Subject: [PATCH 64/72] other: adjust rsma duration for data files --- source/dnode/vnode/src/sma/smaOpen.c | 99 +++++++++++++++++--------- source/dnode/vnode/src/sma/smaRollup.c | 3 - 2 files changed, 64 insertions(+), 38 deletions(-) diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c index 235fb1f941..456ad7109f 100644 --- a/source/dnode/vnode/src/sma/smaOpen.c +++ b/source/dnode/vnode/src/sma/smaOpen.c @@ -16,17 +16,19 @@ #include "sma.h" #include "tsdb.h" -static int32_t smaEvalDays(SRetention *r, int8_t precision); -static int32_t smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type); +#define RETENTION_DAYS_SPLIT_MAX (365 * 1440) + +static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration); +static int32_t smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type); static int32_t rsmaRestore(SSma *pSma); -#define SMA_SET_KEEP_CFG(l) \ +#define SMA_SET_KEEP_CFG(v, l) \ do { \ SRetention *r = &pCfg->retentions[l]; \ pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \ pKeepCfg->keep0 = pKeepCfg->keep2; \ pKeepCfg->keep1 = pKeepCfg->keep2; \ - pKeepCfg->days = smaEvalDays(r, pCfg->precision); \ + pKeepCfg->days = smaEvalDays(v, pCfg->retentions, l, pCfg->precision, pCfg->days); \ } while (0) #define SMA_OPEN_RSMA_IMPL(v, l) \ @@ -38,51 +40,78 @@ static int32_t rsmaRestore(SSma *pSma); } \ break; \ } \ - smaSetKeepCfg(&keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \ + smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \ if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg) < 0) { \ goto _err; \ } \ } while (0) -#define RETENTION_DAYS_SPLIT_RATIO 10 -#define RETENTION_DAYS_SPLIT_MIN 1 -#define RETENTION_DAYS_SPLIT_MAX 30 +/** + * @brief Evaluate days(duration) for rsma level 1/2/3. + * 1) level 1: duration from "create database" + * 2) level 2/3: duration * (freq/freqL1) + * @param pVnode + * @param r + * @param level + * @param precision + * @param duration + * @return int32_t + */ +static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration) { + int32_t freqDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->freq, precision, TIME_UNIT_MINUTE); + int32_t keepDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->keep, precision, TIME_UNIT_MINUTE); + int32_t days = duration; // min -static int32_t smaEvalDays(SRetention *r, int8_t precision) { - int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY); - int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY); - - int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO; - if (days <= RETENTION_DAYS_SPLIT_MIN) { - days = RETENTION_DAYS_SPLIT_MIN; - if (days < freqDays) { - days = freqDays + 1; - } - } else { - if (days > RETENTION_DAYS_SPLIT_MAX) { - days = RETENTION_DAYS_SPLIT_MAX; - } - if (days < freqDays) { - days = freqDays + 1; - } + if (days < freqDuration) { + days = freqDuration; } - return days * 1440; + + if (days > keepDuration) { + days = keepDuration; + } + + if (level == TSDB_RETENTION_L0) { + goto end; + } + + ASSERT(level >= TSDB_RETENTION_L1 && level <= TSDB_RETENTION_L2); + + freqDuration = convertTimeFromPrecisionToUnit((r + level)->freq, precision, TIME_UNIT_MINUTE); + keepDuration = convertTimeFromPrecisionToUnit((r + level)->keep, precision, TIME_UNIT_MINUTE); + + int32_t nFreqTimes = (r + level)->freq / (r + TSDB_RETENTION_L0)->freq; + days *= (nFreqTimes > 1 ? nFreqTimes : 1); + + if (days > keepDuration) { + days = keepDuration; + } + + if (days > RETENTION_DAYS_SPLIT_MAX) { + days = RETENTION_DAYS_SPLIT_MAX; + } + + if (days < freqDuration) { + days = freqDuration; + } +end: + smaInfo("vgId:%d, evaluated duration for level %" PRIi8 " is %d, raw val:%d", TD_VID(pVnode), level + 1, days, duration); + return days; } -int smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) { +int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) { pKeepCfg->precision = pCfg->precision; switch (type) { case TSDB_TYPE_TSMA: ASSERT(0); break; case TSDB_TYPE_RSMA_L0: - SMA_SET_KEEP_CFG(0); + SMA_SET_KEEP_CFG(pVnode, 0); break; case TSDB_TYPE_RSMA_L1: - SMA_SET_KEEP_CFG(1); + SMA_SET_KEEP_CFG(pVnode, 1); break; case TSDB_TYPE_RSMA_L2: - SMA_SET_KEEP_CFG(2); + SMA_SET_KEEP_CFG(pVnode, 2); break; default: ASSERT(0); @@ -148,11 +177,11 @@ int32_t smaClose(SSma *pSma) { /** * @brief rsma env restore - * - * @param pSma - * @param type - * @param committedVer - * @return int32_t + * + * @param pSma + * @param type + * @param committedVer + * @return int32_t */ int32_t tdRsmaRestore(SSma *pSma, int8_t type, int64_t committedVer) { ASSERT(VND_IS_RSMA(pSma->pVnode)); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index f33d8dc2d0..f2063e3067 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -1078,9 +1078,6 @@ static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTa goto _err; } - SSmaEnv *pRSmaEnv = pSma->pRSmaEnv; - SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pRSmaEnv); - SRSmaQTaskInfoIter fIter = {0}; if (tdRSmaQTaskInfoIterInit(&fIter, &tFile) < 0) { tdRSmaQTaskInfoIterDestroy(&fIter); From f3e2bef13efb3c334e6076292c6f3827c99cecbd Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 31 Aug 2022 19:12:06 +0800 Subject: [PATCH 65/72] other: adjust max duration for rsma --- source/dnode/vnode/src/sma/smaOpen.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c index 456ad7109f..3c3097bb2f 100644 --- a/source/dnode/vnode/src/sma/smaOpen.c +++ b/source/dnode/vnode/src/sma/smaOpen.c @@ -16,8 +16,6 @@ #include "sma.h" #include "tsdb.h" -#define RETENTION_DAYS_SPLIT_MAX (365 * 1440) - static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration); static int32_t smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type); static int32_t rsmaRestore(SSma *pSma); @@ -86,8 +84,8 @@ static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t p days = keepDuration; } - if (days > RETENTION_DAYS_SPLIT_MAX) { - days = RETENTION_DAYS_SPLIT_MAX; + if (days > TSDB_MAX_DURATION_PER_FILE) { + days = TSDB_MAX_DURATION_PER_FILE; } if (days < freqDuration) { From f6c2cce5d023274dbaad34d2af38737e764c401c Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 19:53:40 +0800 Subject: [PATCH 66/72] test: modify checkpackages scritps of client --- packaging/MPtestJenkinsfile | 60 ++++++++++--- packaging/testpackage.sh | 173 +++++++++++++++++++++++------------- 2 files changed, 157 insertions(+), 76 deletions(-) diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile index 45c8d8abf2..be49fb44be 100644 --- a/packaging/MPtestJenkinsfile +++ b/packaging/MPtestJenkinsfile @@ -64,6 +64,11 @@ pipeline { defaultValue:'2.1.2', description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1' ) + string ( + name:'nasPassword', + defaultValue:'password', + description: 'the pasword of the NAS server which has installPackage-192.168.1.131' + ) } environment{ WORK_DIR = '/var/lib/jenkins/workspace' @@ -111,17 +116,17 @@ pipeline { sync_source("${BRANCH_NAME}") sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' } @@ -134,17 +139,22 @@ pipeline { sync_source("${BRANCH_NAME}") sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_DEB} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_DEB} ${baseVersion} server ${nasPassword} + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client ${nasPassword} python3 checkPackageRuning.py ''' } @@ -157,17 +167,17 @@ pipeline { sync_source("${BRANCH_NAME}") sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' } @@ -179,18 +189,42 @@ pipeline { timeout(time: 30, unit: 'MINUTES'){ sync_source("${BRANCH_NAME}") sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword} + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client ${nasPassword} + python3 checkPackageRuning.py + ''' + } + } + } + + stage('arm64') { + agent{label 'linux_arm64'} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server ${nasPassword} + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client ${nasPassword} python3 checkPackageRuning.py ''' } diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 60acaf7b8d..512e7a7e4e 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -1,7 +1,6 @@ #!/bin/sh - +#parameter scriptDir=$(dirname $(readlink -f $0)) - packgeName=$1 version=$2 originPackageName=$3 @@ -10,6 +9,17 @@ testFile=$5 subFile="taos.tar.gz" password=$6 +# Color setting +RED='\033[41;30m' +GREEN='\033[1;32m' +YELLOW='\033[1;33m' +BLUE='\033[1;34m' +GREEN_DARK='\033[0;32m' +YELLOW_DARK='\033[0;33m' +BLUE_DARK='\033[0;34m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + if [ ${testFile} = "server" ];then tdPath="TDengine-server-${version}" originTdpPath="TDengine-server-${originversion}" @@ -25,62 +35,87 @@ elif [ ${testFile} = "tools" ];then fi function cmdInstall { -comd=$1 -if command -v ${comd} ;then - echo "${comd} is already installed" +command=$1 +if command -v ${command} ;then + echoColor YD "${command} is already installed" else if command -v apt ;then - apt-get install ${comd} -y + apt-get install ${command} -y elif command -v yum ;then - yum -y install ${comd} - echo "you should install ${comd} manually" + yum -y install ${command} + echoColor YD "you should install ${command} manually" fi fi } +function echoColor { +color=$1 +command=$2 -echo "Uninstall all components of TDeingne" - -if command -v rmtaos ;then - echo "uninstall all components of TDeingne:rmtaos" - rmtaos -else - echo "os doesn't include TDengine " +if [ ${color} = 'Y' ];then + echo -e "${YELLOW}${command}${NC}" +elif [ ${color} = 'YD' ];then + echo -e "${YELLOW_DARK}${command}${NC}" +elif [ ${color} = 'R' ];then + echo -e "${RED}${command}${NC}" +elif [ ${color} = 'G' ];then + echo -e "${GREEN}${command}${NC}\r\n" +elif [ ${color} = 'B' ];then + echo -e "${BLUE}${command}${NC}" +elif [ ${color} = 'BD' ];then + echo -e "${BLUE_DARK}${command}${NC}" fi +} -if command -v rmtaostools ;then - echo "uninstall all components of TDeingne:rmtaostools" - rmtaostools -else - echo "os doesn't include rmtaostools " -fi +echoColor G "===== install basesoft =====" cmdInstall tree cmdInstall wget cmdInstall sshpass -echo "new workroom path" +echoColor G "===== Uninstall all components of TDeingne =====" + +if command -v rmtaos ;then + echoColor YD "uninstall all components of TDeingne:rmtaos" + rmtaos +else + echoColor YD "os doesn't include TDengine" +fi + +if command -v rmtaostools ;then + echoColor YD "uninstall all components of TDeingne:rmtaostools" + rmtaostools +else + echoColor YD "os doesn't include rmtaostools " +fi + + + + +echoColor G "===== new workroom path =====" installPath="/usr/local/src/packageTest" oriInstallPath="/usr/local/src/packageTest/3.1" if [ ! -d ${installPath} ] ;then + echoColor BD "mkdir -p ${installPath}" mkdir -p ${installPath} else - echo "${installPath} already exists" + echoColor YD "${installPath} already exists" fi if [ ! -d ${oriInstallPath} ] ;then + echoColor BD "mkdir -p ${oriInstallPath}" mkdir -p ${oriInstallPath} else - echo "${oriInstallPath} already exists" + echoColor YD "${oriInstallPath} already exists" fi -echo "download installPackage" +echoColor G "===== download installPackage =====" # cd ${installPath} # wget https://www.taosdata.com/assets-download/3.0/${packgeName} # cd ${oriInstallPath} @@ -90,7 +125,7 @@ cd ${installPath} cp -r ${scriptDir}/debRpmAutoInstall.sh . if [ ! -f {packgeName} ];then - echo "sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ." + echoColor BD "sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ." sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} . fi @@ -113,43 +148,57 @@ if [ ! -f debRpmAutoInstall.sh ];then echo 'send "\r" ' >> debRpmAutoInstall.sh fi + +echoColor G "===== instal Package =====" + if [[ ${packgeName} =~ "deb" ]];then cd ${installPath} dpkg -r taostools dpkg -r tdengine if [[ ${packgeName} =~ "TDengine" ]];then - echo "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix} + echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix} else - echo "dpkg -i ${packgeName}" && dpkg -i ${packgeName} + echoColor BD "dpkg -i ${packgeName}" && dpkg -i ${packgeName} fi elif [[ ${packgeName} =~ "rpm" ]];then cd ${installPath} sudo rpm -e tdengine sudo rpm -e taostools if [[ ${packgeName} =~ "TDengine" ]];then - echo "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix} + echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix} else - echo "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName} + echoColor BD "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName} fi elif [[ ${packgeName} =~ "tar" ]];then + cd ${installPath}/${tdPath} + if [ ${testFile} = "server" ];then + echoColor BD "bash ${installCmd} -e no " + bash ${installCmd} -e no + else + echoColor BD "bash ${installCmd} " + bash ${installCmd} + fi + + echoColor G "===== check installPackage File of tar =====" + cd ${oriInstallPath} if [ ! -f {originPackageName} ];then + echoColor YD "download base installPackage" + echoColor BD "sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} ." sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} . fi - echo "tar -xvf ${originPackageName}" && tar -xvf ${originPackageName} + echoColor YD "unzip the base installation package" + echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName} cd ${installPath} - echo "tar -xvf ${packgeName}" && tar -xvf ${packgeName} - + echoColor YD "unzip the new installation package" + echoColor BD "tar -xf ${packgeName}" && tar -xf ${packgeName} if [ ${testFile} != "tools" ] ;then - cd ${installPath}/${tdPath} && tar vxf ${subFile} - cd ${oriInstallPath}/${originTdpPath} && tar vxf ${subFile} + cd ${installPath}/${tdPath} && tar xf ${subFile} + cd ${oriInstallPath}/${originTdpPath} && tar xf ${subFile} fi - echo "check installPackage File" - - cd ${oriInstallPath}/${originTdpPath} && tree > ${installPath}/base_${originversion}_checkfile cd ${installPath}/${tdPath} && tree > ${installPath}/now_${version}_checkfile @@ -157,32 +206,30 @@ elif [[ ${packgeName} =~ "tar" ]];then diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log diffNumbers=`cat ${installPath}/diffFile.log |wc -l ` if [ ${diffNumbers} != 0 ];then - echo "The number and names of files have changed from the previous installation package" - echo `cat ${installPath}/diffFile.log` + echoColor R "The number and names of files have changed from the previous installation package" + echoColor Y `cat ${installPath}/diffFile.log` exit -1 + else + echoColor G "The number and names of files are the same as previous installation packages" fi - - cd ${installPath}/${tdPath} - if [ ${testFile} = "server" ];then - bash ${installCmd} -e no - else - bash ${installCmd} - fi - if [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]] ;then - cd ${installPath} - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . - # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz - tar xvf taosTools-2.1.2-Linux-x64.tar.gz - cd taosTools-2.1.2 && bash install-taostools.sh - elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then - cd ${installPath} - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.deb . - dpkg -i taosTools-2.1.2-Linux-x64.deb - elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then - cd ${installPath} - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.rpm . - rpm -ivh taosTools-2.1.2-Linux-x64.rpm --quiet - fi - fi +echoColor G "===== install taos-tools when package is lite or client =====" + + +if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then + cd ${installPath} + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . + # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz + tar xf taosTools-2.1.2-Linux-x64.tar.gz + cd taosTools-2.1.2 && bash install-taostools.sh +elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then + cd ${installPath} + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.deb . + dpkg -i taosTools-2.1.2-Linux-x64.deb +elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then + cd ${installPath} + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.rpm . + rpm -ivh taosTools-2.1.2-Linux-x64.rpm --quiet +fi + From 78705c3168c8364c6b513bfe4081c9ff87c32fb7 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 20:23:19 +0800 Subject: [PATCH 67/72] test: modify checkpackages scritps of client --- packaging/testpackage.sh | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 512e7a7e4e..45d5c933aa 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -104,6 +104,10 @@ else echoColor YD "${installPath} already exists" fi +if [ -d ${installPath}/${tdPath} ] ;then + echoColor BD "rm -rf ${installPath}/${tdPath} " + rm -rf ${installPath}/${tdPath} +fi if [ ! -d ${oriInstallPath} ] ;then echoColor BD "mkdir -p ${oriInstallPath}" @@ -112,7 +116,10 @@ else echoColor YD "${oriInstallPath} already exists" fi - +if [ -d ${oriInstallPath}/${originTdpPath} ] ;then + echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}" + rm -rf ${oriInstallPath} +fi echoColor G "===== download installPackage =====" @@ -206,7 +213,7 @@ elif [[ ${packgeName} =~ "tar" ]];then diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log diffNumbers=`cat ${installPath}/diffFile.log |wc -l ` if [ ${diffNumbers} != 0 ];then - echoColor R "The number and names of files have changed from the previous installation package" + echoColor R "The number and names of files is different from the previous installation package" echoColor Y `cat ${installPath}/diffFile.log` exit -1 else @@ -214,20 +221,22 @@ elif [[ ${packgeName} =~ "tar" ]];then fi fi -echoColor G "===== install taos-tools when package is lite or client =====" if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then + echoColor G "===== install taos-tools when package is lite or client =====" cd ${installPath} sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz tar xf taosTools-2.1.2-Linux-x64.tar.gz cd taosTools-2.1.2 && bash install-taostools.sh elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then + echoColor G "===== install taos-tools when package is lite or client =====" cd ${installPath} sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.deb . dpkg -i taosTools-2.1.2-Linux-x64.deb elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then + echoColor G "===== install taos-tools when package is lite or client =====" cd ${installPath} sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.rpm . rpm -ivh taosTools-2.1.2-Linux-x64.rpm --quiet From 9d00afd826efa933881ab62863001eb2324340fb Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 20:31:46 +0800 Subject: [PATCH 68/72] test: modify checkpackages scritps --- packaging/testpackage.sh | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 45d5c933aa..0b04749a35 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -105,8 +105,8 @@ else fi if [ -d ${installPath}/${tdPath} ] ;then - echoColor BD "rm -rf ${installPath}/${tdPath} " - rm -rf ${installPath}/${tdPath} + echoColor BD "rm -rf ${installPath}/${tdPath}/*" + rm -rf ${installPath}/${tdPath}/* fi if [ ! -d ${oriInstallPath} ] ;then @@ -117,8 +117,8 @@ else fi if [ -d ${oriInstallPath}/${originTdpPath} ] ;then - echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}" - rm -rf ${oriInstallPath} + echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}/*" + rm -rf ${oriInstallPath}/${originTdpPath}/* fi @@ -177,24 +177,13 @@ elif [[ ${packgeName} =~ "rpm" ]];then echoColor BD "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName} fi elif [[ ${packgeName} =~ "tar" ]];then - cd ${installPath}/${tdPath} - if [ ${testFile} = "server" ];then - echoColor BD "bash ${installCmd} -e no " - bash ${installCmd} -e no - else - echoColor BD "bash ${installCmd} " - bash ${installCmd} - fi - echoColor G "===== check installPackage File of tar =====" - cd ${oriInstallPath} if [ ! -f {originPackageName} ];then echoColor YD "download base installPackage" echoColor BD "sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} ." sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} . fi - echoColor YD "unzip the base installation package" echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName} cd ${installPath} @@ -212,6 +201,7 @@ elif [[ ${packgeName} =~ "tar" ]];then cd ${installPath} diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log diffNumbers=`cat ${installPath}/diffFile.log |wc -l ` + if [ ${diffNumbers} != 0 ];then echoColor R "The number and names of files is different from the previous installation package" echoColor Y `cat ${installPath}/diffFile.log` @@ -219,6 +209,15 @@ elif [[ ${packgeName} =~ "tar" ]];then else echoColor G "The number and names of files are the same as previous installation packages" fi + echoColor YD "===== install Package of tar =====" + cd ${installPath}/${tdPath} + if [ ${testFile} = "server" ];then + echoColor BD "bash ${installCmd} -e no " + bash ${installCmd} -e no + else + echoColor BD "bash ${installCmd} " + bash ${installCmd} + fi fi From c85cf0141a63b5f145b3685ad56d80268ef5aa44 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 20:51:41 +0800 Subject: [PATCH 69/72] test: modify checkpackages scritps of client --- packaging/testpackage.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 0b04749a35..2bac1c827b 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -220,7 +220,7 @@ elif [[ ${packgeName} =~ "tar" ]];then fi fi - +cd ${installPath} if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then echoColor G "===== install taos-tools when package is lite or client =====" @@ -232,12 +232,14 @@ if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${pa elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then echoColor G "===== install taos-tools when package is lite or client =====" cd ${installPath} - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.deb . - dpkg -i taosTools-2.1.2-Linux-x64.deb + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . + tar xf taosTools-2.1.2-Linux-x64.tar.gz + cd taosTools-2.1.2 && bash install-taostools.sh elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then echoColor G "===== install taos-tools when package is lite or client =====" cd ${installPath} - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.rpm . - rpm -ivh taosTools-2.1.2-Linux-x64.rpm --quiet + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . + tar xf taosTools-2.1.2-Linux-x64.tar.gz + cd taosTools-2.1.2 && bash install-taostools.sh fi From d10484050bf0bdf630c056a89f1f0b8fd594224f Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 31 Aug 2022 21:30:18 +0800 Subject: [PATCH 70/72] test: modify checkpackages scritps --- packaging/MPtestJenkinsfile | 2 +- packaging/testpackage.sh | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile index be49fb44be..77f642180a 100644 --- a/packaging/MPtestJenkinsfile +++ b/packaging/MPtestJenkinsfile @@ -149,7 +149,7 @@ pipeline { ''' sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_DEB} ${baseVersion} server ${nasPassword} + bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword} python3 checkPackageRuning.py ''' sh ''' diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 2bac1c827b..56da9e59be 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -132,8 +132,8 @@ cd ${installPath} cp -r ${scriptDir}/debRpmAutoInstall.sh . if [ ! -f {packgeName} ];then - echoColor BD "sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ." - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} . + echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ." + sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} . fi packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}') @@ -181,8 +181,8 @@ elif [[ ${packgeName} =~ "tar" ]];then cd ${oriInstallPath} if [ ! -f {originPackageName} ];then echoColor YD "download base installPackage" - echoColor BD "sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} ." - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} . + echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} ." + sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} . fi echoColor YD "unzip the base installation package" echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName} @@ -225,20 +225,20 @@ cd ${installPath} if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then echoColor G "===== install taos-tools when package is lite or client =====" cd ${installPath} - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . + sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz tar xf taosTools-2.1.2-Linux-x64.tar.gz cd taosTools-2.1.2 && bash install-taostools.sh elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then echoColor G "===== install taos-tools when package is lite or client =====" cd ${installPath} - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . + sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . tar xf taosTools-2.1.2-Linux-x64.tar.gz cd taosTools-2.1.2 && bash install-taostools.sh elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then echoColor G "===== install taos-tools when package is lite or client =====" cd ${installPath} - sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . + sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . tar xf taosTools-2.1.2-Linux-x64.tar.gz cd taosTools-2.1.2 && bash install-taostools.sh fi From e765adcd01b911f7aa28fcdd092de9f03fe82f13 Mon Sep 17 00:00:00 2001 From: Sean Ely <105326513+sean-tdengine@users.noreply.github.com> Date: Wed, 31 Aug 2022 09:38:35 -0700 Subject: [PATCH 71/72] docs: Python Connector - missing quote (#16547) Missing a quote in the first word --- docs/en/14-reference/03-connector/07-python.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index fc95033baa..d92a93fd4f 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -7,7 +7,7 @@ description: "taospy is the official Python connector for TDengine. taospy provi import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. +`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". From 0e2d4346d80e12d1846594167933a35db2341ca0 Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Thu, 1 Sep 2022 09:27:27 +0800 Subject: [PATCH 72/72] docs: fix minor punctuation typos --- docs/zh/02-intro.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md index 012c49d2c3..9a0a6fb547 100644 --- a/docs/zh/02-intro.md +++ b/docs/zh/02-intro.md @@ -26,7 +26,7 @@ TDengine 的主要功能如下: - [Icinga2](../third-party/icinga2) - [TCollector](../third-party/tcollector) - [EMQX](../third-party/emq-broker) - - [HiveMQ](../third-party/hive-mq-broker) ; + - [HiveMQ](../third-party/hive-mq-broker) 2. 查询数据,支持 - [标准 SQL](../taos-sql),含嵌套查询 - [时序数据特色函数](../taos-sql/function/#time-series-extensions) @@ -85,14 +85,14 @@ TDengine 的主要功能如下: ![TDengine Database 技术生态图](eco_system.webp) -
图 1. TDengine技术生态图
+
图 1. TDengine 技术生态图
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。 ## 典型适用场景 -作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。 +作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。 ### 数据源特点和需求