From 767a97bcbe1fac80bcb592d1054e6dc29b71be82 Mon Sep 17 00:00:00 2001 From: cpwu Date: Sat, 6 Aug 2022 16:27:20 +0800 Subject: [PATCH 01/36] fix case --- .../2-query/distribute_agg_spread.py | 135 +++++++++--------- 1 file changed, 65 insertions(+), 70 deletions(-) diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py index 842a74628d..9d878cc62f 100644 --- a/tests/system-test/2-query/distribute_agg_spread.py +++ b/tests/system-test/2-query/distribute_agg_spread.py @@ -6,13 +6,9 @@ import random class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) + tdLog.debug(f"start to execute {__file__}") tdSql.init(conn.cursor()) self.vnode_disbutes = None self.ts = 1537146000000 @@ -31,60 +27,61 @@ class TDTestCase: same_result = tdSql.queryResult if spread_result !=same_result: - tdLog.exit(" max function work not as expected, sql : %s "% spread_sql) + tdLog.exit(f" max function work not as expected, sql : {spread_sql} ") else: - tdLog.info(" max function work as expected, sql : %s "% spread_sql) + tdLog.info(f" max function work as expected, sql : {spread_sql} ") - def prepare_datas_of_distribute(self): + def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f" use {dbname}") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) for i in range(1,21): if i ==1 or i == 4: continue else: - tbname = "ct"+f'{i}' + tbname = f"ct{i}" for j in range(9): tdSql.execute( - f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -100,11 +97,11 @@ class TDTestCase: ''' ) - tdLog.info(" prepare data for distributed_aggregate done! ") + tdLog.info(f" prepare data for distributed_aggregate done! ") - def check_distribute_datas(self): + def check_distribute_datas(self, dbname="testdb"): # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -112,9 +109,8 @@ class TDTestCase: for vgroup_id in vgroups: vnode_tables[vgroup_id[0]]=[] - # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -126,9 +122,9 @@ class TDTestCase: if len(v)>=2: count+=1 if count < 2: - tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ") - def check_spread_distribute_diff_vnode(self,col_name): + def check_spread_distribute_diff_vnode(self,col_name, dbname="testdb"): vgroup_ids = [] for k ,v in self.vnode_disbutes.items(): @@ -142,13 +138,13 @@ class TDTestCase: distribute_tbnames.append(random.sample(vnode_tables,1)[0]) tbname_ins = "" for tbname in distribute_tbnames: - tbname_ins += "'%s' ,"%tbname + tbname_ins += f"'{tbname}' ," tbname_filters = tbname_ins[:-1] - spread_sql = f"select spread({col_name}) from stb1 where tbname in ({tbname_filters})" + spread_sql = f"select spread({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters})" - same_sql = f"select max({col_name}) - min({col_name}) from stb1 where tbname in ({tbname_filters})" + same_sql = f"select max({col_name}) - min({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters})" tdSql.query(spread_sql) spread_result = tdSql.queryResult @@ -157,20 +153,20 @@ class TDTestCase: same_result = tdSql.queryResult if spread_result !=same_result: - tdLog.exit(" spread function work not as expected, sql : %s "% spread_sql) + tdLog.exit(f" spread function work not as expected, sql : {spread_sql} ") else: - tdLog.info(" spread function work as expected, sql : %s "% spread_sql) + tdLog.info(f" spread function work as expected, sql : {spread_sql} ") - def check_spread_status(self): + def check_spread_status(self, dbname="testdb"): # check max function work status - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: - tablenames.append(table_name[0]) + tablenames.append(f"{dbname}.{table_name[0]}") - tdSql.query("desc stb1") + tdSql.query(f"desc {dbname}.stb1") col_names = tdSql.queryResult colnames = [] @@ -185,80 +181,79 @@ class TDTestCase: # check max function for different vnode for colname in colnames: - if colname.startswith("c"): + if colname.startswith(f"c"): self.check_spread_distribute_diff_vnode(colname) else: # self.check_spread_distribute_diff_vnode(colname) # bug for tag pass - - def distribute_agg_query(self): + def distribute_agg_query(self, dbname="testdb"): # basic filter - tdSql.query("select spread(c1) from stb1 where c1 is null") + tdSql.query(f"select spread(c1) from {dbname}.stb1 where c1 is null") tdSql.checkRows(0) - tdSql.query("select spread(c1) from stb1 where t1=1") + tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1=1") tdSql.checkData(0,0,8.000000000) - tdSql.query("select spread(c1+c2) from stb1 where c1 =1 ") + tdSql.query(f"select spread(c1+c2) from {dbname}.stb1 where c1 =1 ") tdSql.checkData(0,0,0.000000000) - tdSql.query("select spread(c1) from stb1 where tbname=\"ct2\"") + tdSql.query(f"select spread(c1) from {dbname}.stb1 where tbname=\"ct2\"") tdSql.checkData(0,0,8.000000000) - tdSql.query("select spread(c1) from stb1 partition by tbname") + tdSql.query(f"select spread(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - tdSql.query("select spread(c1) from stb1 where t1> 4 partition by tbname") + tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1> 4 partition by tbname") tdSql.checkRows(15) # union all - tdSql.query("select spread(c1) from stb1 union all select max(c1)-min(c1) from stb1 ") + tdSql.query(f"select spread(c1) from {dbname}.stb1 union all select max(c1)-min(c1) from {dbname}.stb1 ") tdSql.checkRows(2) tdSql.checkData(0,0,28.000000000) # join - tdSql.execute(" create database if not exists db ") - tdSql.execute(" use db ") - tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") - tdSql.execute(" create table tb1 using st tags(1) ") - tdSql.execute(" create table tb2 using st tags(2) ") + tdSql.execute(f" create database if not exists db ") + tdSql.execute(f" use db ") + tdSql.execute(f" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(f" create table db.tb1 using db.st tags(1) ") + tdSql.execute(f" create table db.tb2 using db.st tags(2) ") for i in range(10): ts = i*10 + self.ts - tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") - tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)") - tdSql.query("select spread(tb1.c1), spread(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.query(f"select spread(tb1.c1), spread(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts") tdSql.checkRows(1) tdSql.checkData(0,0,9.000000000) tdSql.checkData(0,0,9.00000) # group by - tdSql.execute(" use testdb ") - tdSql.query(" select max(c1),c1 from stb1 group by t1 ") + tdSql.execute(f" use {dbname} ") + tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by t1 ") tdSql.checkRows(20) - tdSql.query(" select max(c1),c1 from stb1 group by c1 ") + tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by c1 ") tdSql.checkRows(30) - tdSql.query(" select max(c1),c2 from stb1 group by c2 ") + tdSql.query(f" select max(c1),c2 from {dbname}.stb1 group by c2 ") tdSql.checkRows(31) # partition by tbname or partition by tag - tdSql.query("select spread(c1) from stb1 partition by tbname") + tdSql.query(f"select spread(c1) from {dbname}.stb1 partition by tbname") query_data = tdSql.queryResult # nest query for support max - tdSql.query("select spread(c2+2)+1 from (select max(c1) c2 from stb1)") + tdSql.query(f"select spread(c2+2)+1 from (select max(c1) c2 from {dbname}.stb1)") tdSql.checkData(0,0,1.000000000) - tdSql.query("select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.query(f"select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,29.000000000) - tdSql.query("select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.query(f"select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,29.000000000) # mixup with other functions - tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1) from stb1") + tdSql.query(f"select max(c1),count(c1),last(c2,c3),spread(c1) from {dbname}.stb1") tdSql.checkData(0,0,28) tdSql.checkData(0,1,184) tdSql.checkData(0,2,-99999) @@ -275,7 +270,7 @@ class TDTestCase: def stop(self): tdSql.close() - tdLog.success("%s successfully executed" % __file__) + tdLog.success(f"{__file__} successfully executed") tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) From ca55fc525bc6918a3ec080fb7b9a45afa4ea78e0 Mon Sep 17 00:00:00 2001 From: cpwu Date: Sat, 6 Aug 2022 17:14:13 +0800 Subject: [PATCH 02/36] add case for rest API --- .../2-query/distribute_agg_spread.py | 4 +- .../2-query/distribute_agg_stddev.py | 122 ++++++++---------- .../system-test/2-query/distribute_agg_sum.py | 119 ++++++++--------- tests/system-test/fulltest.sh | 15 ++- 4 files changed, 121 insertions(+), 139 deletions(-) diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py index 9d878cc62f..318f31f9a1 100644 --- a/tests/system-test/2-query/distribute_agg_spread.py +++ b/tests/system-test/2-query/distribute_agg_spread.py @@ -6,6 +6,7 @@ import random class TDTestCase: + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug(f"start to execute {__file__}") @@ -183,9 +184,6 @@ class TDTestCase: for colname in colnames: if colname.startswith(f"c"): self.check_spread_distribute_diff_vnode(colname) - else: - # self.check_spread_distribute_diff_vnode(colname) # bug for tag - pass def distribute_agg_query(self, dbname="testdb"): # basic filter diff --git a/tests/system-test/2-query/distribute_agg_stddev.py b/tests/system-test/2-query/distribute_agg_stddev.py index 22c7c598b4..56768d3be4 100644 --- a/tests/system-test/2-query/distribute_agg_stddev.py +++ b/tests/system-test/2-query/distribute_agg_stddev.py @@ -7,10 +7,7 @@ import platform import math class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -45,55 +42,56 @@ class TDTestCase: else: tdLog.exit(" sql:%s; row:0 col:0 data:%d , expect:%d"%(stddev_sql,tdSql.queryResult[0][0],stddev_result)) - def prepare_datas_of_distribute(self): + def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f" use {dbname}") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) for i in range(1,21): if i ==1 or i == 4: continue else: - tbname = "ct"+f'{i}' + tbname = f"ct{i}" for j in range(9): tdSql.execute( - f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -109,11 +107,11 @@ class TDTestCase: ''' ) - tdLog.info(" prepare data for distributed_aggregate done! ") + tdLog.info(f" prepare data for distributed_aggregate done! ") - def check_distribute_datas(self): + def check_distribute_datas(self, dbname="testdb"): # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -121,9 +119,8 @@ class TDTestCase: for vgroup_id in vgroups: vnode_tables[vgroup_id[0]]=[] - # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -135,9 +132,9 @@ class TDTestCase: if len(v)>=2: count+=1 if count < 2: - tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ") - def check_stddev_distribute_diff_vnode(self,col_name): + def check_stddev_distribute_diff_vnode(self,col_name, dbname="testdb"): vgroup_ids = [] for k ,v in self.vnode_disbutes.items(): @@ -155,9 +152,9 @@ class TDTestCase: tbname_filters = tbname_ins[:-1] - stddev_sql = f"select stddev({col_name}) from stb1 where tbname in ({tbname_filters});" + stddev_sql = f"select stddev({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});" - same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null " + same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null " tdSql.query(same_sql) pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] @@ -175,17 +172,16 @@ class TDTestCase: tdSql.query(stddev_sql) tdSql.checkData(0,0,stddev_result) - - def check_stddev_status(self): + def check_stddev_status(self, dbname="testdb"): # check max function work status - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: - tablenames.append(table_name[0]) + tablenames.append(f"{dbname}.{table_name[0]}") - tdSql.query("desc stb1") + tdSql.query(f"desc {dbname}.stb1") col_names = tdSql.queryResult colnames = [] @@ -197,50 +193,42 @@ class TDTestCase: for colname in colnames: if colname.startswith("c"): self.check_stddev_functions(tablename,colname) - else: - # self.check_stddev_functions(tablename,colname) - pass - # check max function for different vnode for colname in colnames: if colname.startswith("c"): self.check_stddev_distribute_diff_vnode(colname) - else: - # self.check_stddev_distribute_diff_vnode(colname) # bug for tag - pass - - def distribute_agg_query(self): + def distribute_agg_query(self, dbname="testdb"): # basic filter - tdSql.query(" select stddev(c1) from stb1 ") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,6.694663959) - tdSql.query(" select stddev(a) from (select stddev(c1) a from stb1 partition by tbname) ") + tdSql.query(f"select stddev(a) from (select stddev(c1) a from {dbname}.stb1 partition by tbname) ") tdSql.checkData(0,0,0.156797505) - tdSql.query(" select stddev(c1) from stb1 where t1=1") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 where t1=1") tdSql.checkData(0,0,2.581988897) - tdSql.query("select stddev(c1+c2) from stb1 where c1 =1 ") + tdSql.query(f"select stddev(c1+c2) from {dbname}.stb1 where c1 =1 ") tdSql.checkData(0,0,0.000000000) - tdSql.query("select stddev(c1) from stb1 where tbname=\"ct2\"") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 where tbname=\"ct2\"") tdSql.checkData(0,0,2.581988897) - tdSql.query("select stddev(c1) from stb1 partition by tbname") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - tdSql.query("select stddev(c1) from stb1 where t1> 4 partition by tbname") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 where t1> 4 partition by tbname") tdSql.checkRows(15) # union all - tdSql.query("select stddev(c1) from stb1 union all select stddev(c1) from stb1 ") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 union all select stddev(c1) from {dbname}.stb1 ") tdSql.checkRows(2) tdSql.checkData(0,0,6.694663959) - tdSql.query("select stddev(a) from (select stddev(c1) a from stb1 union all select stddev(c1) a from stb1)") + tdSql.query(f"select stddev(a) from (select stddev(c1) a from {dbname}.stb1 union all select stddev(c1) a from {dbname}.stb1)") tdSql.checkRows(1) tdSql.checkData(0,0,0.000000000) @@ -248,38 +236,38 @@ class TDTestCase: tdSql.execute(" create database if not exists db ") tdSql.execute(" use db ") - tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") - tdSql.execute(" create table tb1 using st tags(1) ") - tdSql.execute(" create table tb2 using st tags(2) ") + tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table db.tb1 using db.st tags(1) ") + tdSql.execute(" create table db.tb2 using db.st tags(2) ") for i in range(10): ts = i*10 + self.ts - tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") - tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)") - tdSql.query("select stddev(tb1.c1), stddev(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.query("select stddev(tb1.c1), stddev(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts") tdSql.checkRows(1) tdSql.checkData(0,0,2.872281323) tdSql.checkData(0,1,2.872281323) # group by - tdSql.execute(" use testdb ") + tdSql.execute(f" use {dbname} ") # partition by tbname or partition by tag - tdSql.query("select stddev(c1) from stb1 partition by tbname") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) # nest query for support max - tdSql.query("select stddev(c2+2)+1 from (select stddev(c1) c2 from stb1)") + tdSql.query(f"select stddev(c2+2)+1 from (select stddev(c1) c2 from {dbname}.stb1)") tdSql.checkData(0,0,1.000000000) - tdSql.query("select stddev(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.query(f"select stddev(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,6.694663959) - tdSql.query("select stddev(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.query(f"select stddev(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,6.694663959) # mixup with other functions - tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1),stddev(c1) from stb1") + tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1),stddev(c1) from {dbname}.stb1") tdSql.checkData(0,0,28) tdSql.checkData(0,1,184) tdSql.checkData(0,2,-99999) diff --git a/tests/system-test/2-query/distribute_agg_sum.py b/tests/system-test/2-query/distribute_agg_sum.py index 90d1edca90..90946f388d 100644 --- a/tests/system-test/2-query/distribute_agg_sum.py +++ b/tests/system-test/2-query/distribute_agg_sum.py @@ -7,10 +7,7 @@ import platform class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -34,55 +31,56 @@ class TDTestCase: tdSql.query(sum_sql) tdSql.checkData(0,0,pre_sum) - def prepare_datas_of_distribute(self): + def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f" use {dbname}") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) for i in range(1,21): if i ==1 or i == 4: continue else: - tbname = "ct"+f'{i}' + tbname = f"ct{i}" for j in range(9): tdSql.execute( - f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -98,11 +96,11 @@ class TDTestCase: ''' ) - tdLog.info(" prepare data for distributed_aggregate done! ") + tdLog.info(f" prepare data for distributed_aggregate done! ") - def check_distribute_datas(self): + def check_distribute_datas(self, dbname="testdb"): # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -110,9 +108,8 @@ class TDTestCase: for vgroup_id in vgroups: vnode_tables[vgroup_id[0]]=[] - # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -124,9 +121,9 @@ class TDTestCase: if len(v)>=2: count+=1 if count < 2: - tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ") - def check_sum_distribute_diff_vnode(self,col_name): + def check_sum_distribute_diff_vnode(self,col_name, dbname="testdb"): vgroup_ids = [] for k ,v in self.vnode_disbutes.items(): @@ -144,9 +141,9 @@ class TDTestCase: tbname_filters = tbname_ins[:-1] - sum_sql = f"select sum({col_name}) from stb1 where tbname in ({tbname_filters});" + sum_sql = f"select sum({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});" - same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null " + same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null " tdSql.query(same_sql) pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] @@ -157,16 +154,16 @@ class TDTestCase: tdSql.query(sum_sql) tdSql.checkData(0,0,pre_sum) - def check_sum_status(self): + def check_sum_status(self, dbname="testdb"): # check max function work status - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: - tablenames.append(table_name[0]) + tablenames.append(f"{dbname}.{table_name[0]}") - tdSql.query("desc stb1") + tdSql.query(f"desc {dbname}.stb1") col_names = tdSql.queryResult colnames = [] @@ -183,79 +180,75 @@ class TDTestCase: for colname in colnames: if colname.startswith("c"): self.check_sum_distribute_diff_vnode(colname) - else: - # self.check_sum_distribute_diff_vnode(colname) # bug for tag - pass - - def distribute_agg_query(self): + def distribute_agg_query(self, dbname="testdb"): # basic filter - tdSql.query(" select sum(c1) from stb1 ") + tdSql.query(f"select sum(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,2592) - tdSql.query(" select sum(a) from (select sum(c1) a from stb1 partition by tbname) ") + tdSql.query(f"select sum(a) from (select sum(c1) a from {dbname}.stb1 partition by tbname) ") tdSql.checkData(0,0,2592) - tdSql.query(" select sum(c1) from stb1 where t1=1") + tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1=1") tdSql.checkData(0,0,54) - tdSql.query("select sum(c1+c2) from stb1 where c1 =1 ") + tdSql.query(f"select sum(c1+c2) from {dbname}.stb1 where c1 =1 ") tdSql.checkData(0,0,22224.000000000) - tdSql.query("select sum(c1) from stb1 where tbname=\"ct2\"") + tdSql.query(f"select sum(c1) from {dbname}.stb1 where tbname=\"ct2\"") tdSql.checkData(0,0,54) - tdSql.query("select sum(c1) from stb1 partition by tbname") + tdSql.query(f"select sum(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - tdSql.query("select sum(c1) from stb1 where t1> 4 partition by tbname") + tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1> 4 partition by tbname") tdSql.checkRows(15) # union all - tdSql.query("select sum(c1) from stb1 union all select sum(c1) from stb1 ") + tdSql.query(f"select sum(c1) from {dbname}.stb1 union all select sum(c1) from {dbname}.stb1 ") tdSql.checkRows(2) tdSql.checkData(0,0,2592) - tdSql.query("select sum(a) from (select sum(c1) a from stb1 union all select sum(c1) a from stb1)") + tdSql.query(f"select sum(a) from (select sum(c1) a from {dbname}.stb1 union all select sum(c1) a from {dbname}.stb1)") tdSql.checkRows(1) tdSql.checkData(0,0,5184) # join - tdSql.execute(" create database if not exists db ") tdSql.execute(" use db ") - tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") - tdSql.execute(" create table tb1 using st tags(1) ") - tdSql.execute(" create table tb2 using st tags(2) ") + tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table db.tb1 using db.st tags(1) ") + tdSql.execute(" create table db.tb2 using db.st tags(2) ") for i in range(10): ts = i*10 + self.ts - tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") - tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)") - tdSql.query("select sum(tb1.c1), sum(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + + tdSql.query("select sum(tb1.c1), sum(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts") tdSql.checkRows(1) tdSql.checkData(0,0,45) tdSql.checkData(0,1,45.000000000) # group by - tdSql.execute(" use testdb ") + tdSql.execute(f"use {dbname} ") # partition by tbname or partition by tag - tdSql.query("select sum(c1) from stb1 partition by tbname") + tdSql.query(f"select sum(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) # nest query for support max - tdSql.query("select abs(c2+2)+1 from (select sum(c1) c2 from stb1)") + tdSql.query(f"select abs(c2+2)+1 from (select sum(c1) c2 from {dbname}.stb1)") tdSql.checkData(0,0,2595.000000000) - tdSql.query("select sum(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.query(f"select sum(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,2960.000000000) - tdSql.query("select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.query(f"select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,2960.000000000) # mixup with other functions - tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2) from stb1") + tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2) from {dbname}.stb1") tdSql.checkData(0,0,28) tdSql.checkData(0,1,184) tdSql.checkData(0,2,-99999) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 1f6e8ce1f5..1f0d82754b 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -90,6 +90,12 @@ python3 ./test.py -f 2-query/distribute_agg_max.py python3 ./test.py -f 2-query/distribute_agg_max.py -R python3 ./test.py -f 2-query/distribute_agg_min.py python3 ./test.py -f 2-query/distribute_agg_min.py -R +python3 ./test.py -f 2-query/distribute_agg_spread.py +python3 ./test.py -f 2-query/distribute_agg_spread.py -R +python3 ./test.py -f 2-query/distribute_agg_stddev.py +python3 ./test.py -f 2-query/distribute_agg_stddev.py -R +python3 ./test.py -f 2-query/distribute_agg_sum.py +python3 ./test.py -f 2-query/distribute_agg_sum.py -R @@ -156,9 +162,6 @@ python3 ./test.py -f 2-query/function_stateduration.py python3 ./test.py -f 2-query/statecount.py python3 ./test.py -f 2-query/tail.py python3 ./test.py -f 2-query/ttl_comment.py -python3 ./test.py -f 2-query/distribute_agg_sum.py -python3 ./test.py -f 2-query/distribute_agg_spread.py -python3 ./test.py -f 2-query/distribute_agg_stddev.py python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 2-query/irate.py python3 ./test.py -f 2-query/function_null.py @@ -195,7 +198,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 -# vnode case +# vnode case python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 @@ -214,8 +217,8 @@ python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_query # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 From f83ca89ea22746528b26d0caf502b6140a7ebab1 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Sat, 6 Aug 2022 19:40:10 +0800 Subject: [PATCH 03/36] refactor(sync): make leader life longer --- source/common/src/tglobal.c | 3 +- source/libs/sync/inc/syncRaftLog.h | 2 + source/libs/sync/src/syncAppendEntries.c | 112 ++++++++++++++--------- source/libs/sync/src/syncMain.c | 3 + source/libs/sync/src/syncRaftLog.c | 18 +++- 5 files changed, 91 insertions(+), 47 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index f836cd76ac..a0f02d96f9 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -401,7 +401,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1); if (cfgAddInt32(pCfg, "numOfVnodeWriteThreads", tsNumOfVnodeWriteThreads, 1, 1024, 0) != 0) return -1; - tsNumOfVnodeSyncThreads = tsNumOfCores; + // tsNumOfVnodeSyncThreads = tsNumOfCores; + tsNumOfVnodeSyncThreads = 32; tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 1); if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1; diff --git a/source/libs/sync/inc/syncRaftLog.h b/source/libs/sync/inc/syncRaftLog.h index 65ec77e38f..ff59189a9d 100644 --- a/source/libs/sync/inc/syncRaftLog.h +++ b/source/libs/sync/inc/syncRaftLog.h @@ -47,6 +47,8 @@ char* logStoreSimple2Str(SSyncLogStore* pLogStore); SyncIndex logStoreFirstIndex(SSyncLogStore* pLogStore); +SyncIndex logStoreWalCommitVer(SSyncLogStore* pLogStore); + // for debug void logStorePrint(SSyncLogStore* pLogStore); void logStorePrint2(char* s, SSyncLogStore* pLogStore); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index f31f3dd1ae..85d1fa6d31 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -357,16 +357,14 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); ASSERT(code == 0); - char eventLog[128]; - snprintf(eventLog, sizeof(eventLog), "log truncate, from %" PRId64 " to %" PRId64, delBegin, delEnd); - syncNodeEventLog(ths, eventLog); - logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); - return code; } +// if FromIndex > walCommitVer, return 0 +// else return num of pass entries static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) { - int32_t code; + int32_t code = 0; + int32_t pass = 0; SyncIndex delBegin = FromIndex; SyncIndex delEnd = ths->pLogStore->syncLogLastIndex(ths->pLogStore); @@ -398,16 +396,31 @@ static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) { } } + // update delete begin + SyncIndex walCommitVer = logStoreWalCommitVer(ths->pLogStore); + + if (delBegin <= walCommitVer) { + delBegin = walCommitVer + 1; + pass = walCommitVer - delBegin + 1; + + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "update delete begin to %ld", delBegin); + syncNodeEventLog(ths, logBuf); + } while (0); + } + // delete confict entries code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); ASSERT(code == 0); - char eventLog[128]; - snprintf(eventLog, sizeof(eventLog), "log truncate, from %" PRId64 " to %" PRId64, delBegin, delEnd); - syncNodeEventLog(ths, eventLog); - logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "make log same from:%ld, delbegin:%ld, pass:%d", FromIndex, delBegin, pass); + syncNodeEventLog(ths, logBuf); + } while (0); - return code; + return pass; } int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry, int32_t code) { @@ -543,31 +556,34 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc SOffsetAndContLen* metaTableArr = syncAppendEntriesBatchMetaTableArray(pMsg); if (hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex) { + int32_t pass = 0; + SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); + bool hasExtraEntries = logLastIndex > pMsg->prevLogIndex; + // make log same - do { - SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); - bool hasExtraEntries = logLastIndex > pMsg->prevLogIndex; - - if (hasExtraEntries) { - // make log same, rollback deleted entries - code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); - ASSERT(code == 0); - } - - } while (0); + if (hasExtraEntries) { + // make log same, rollback deleted entries + pass = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); + ASSERT(pass >= 0); + } // append entry batch - for (int32_t i = 0; i < pMsg->dataCount; ++i) { - SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); - if (code != 0) { - return -1; + if (pass == 0) { + // assert! no batch + ASSERT(pMsg->dataCount == 1); + + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); + code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); + if (code != 0) { + return -1; + } + + code = syncNodePreCommit(ths, pAppendEntry, 0); + ASSERT(code == 0); + + // syncEntryDestory(pAppendEntry); } - - code = syncNodePreCommit(ths, pAppendEntry, 0); - ASSERT(code == 0); - - // syncEntryDestory(pAppendEntry); } // fsync once @@ -670,25 +686,33 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncLogRecvAppendEntriesBatch(ths, pMsg, "really match"); + int32_t pass = 0; + if (hasExtraEntries) { // make log same, rollback deleted entries - code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); - ASSERT(code == 0); + pass = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); + ASSERT(pass >= 0); } if (hasAppendEntries) { // append entry batch - for (int32_t i = 0; i < pMsg->dataCount; ++i) { - SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); - if (code != 0) { - return -1; + if (pass == 0) { + // assert! no batch + ASSERT(pMsg->dataCount == 1); + + // append entry batch + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); + code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); + if (code != 0) { + return -1; + } + + code = syncNodePreCommit(ths, pAppendEntry, 0); + ASSERT(code == 0); + + // syncEntryDestory(pAppendEntry); } - - code = syncNodePreCommit(ths, pAppendEntry, 0); - ASSERT(code == 0); - - // syncEntryDestory(pAppendEntry); } // fsync once diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index daa65add3c..389eda0f8b 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2409,6 +2409,9 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) { SSyncNode* pSyncNode = (SSyncNode*)param; + + syncNodeEventLog(pSyncNode, "eq hb timer"); + if (pSyncNode->replicaNum > 1) { if (atomic_load_64(&pSyncNode->heartbeatTimerLogicClockUser) <= atomic_load_64(&pSyncNode->heartbeatTimerLogicClock)) { diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index b575e40d86..0649e064e4 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -305,10 +305,18 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, return code; } +// truncate semantic static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIndex) { SSyncLogStoreData* pData = pLogStore->data; SWal* pWal = pData->pWal; - int32_t code = walRollback(pWal, fromIndex); + + // need not truncate + SyncIndex wallastVer = walGetLastVer(pWal); + if (fromIndex > wallastVer) { + return 0; + } + + int32_t code = walRollback(pWal, fromIndex); if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); @@ -323,7 +331,7 @@ static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIn // event log do { char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "wal truncate, from-index:%" PRId64, fromIndex); + snprintf(logBuf, sizeof(logBuf), "log truncate, from-index:%" PRId64, fromIndex); syncNodeEventLog(pData->pSyncNode, logBuf); } while (0); @@ -637,6 +645,12 @@ SyncIndex logStoreFirstIndex(SSyncLogStore* pLogStore) { return walGetFirstVer(pWal); } +SyncIndex logStoreWalCommitVer(SSyncLogStore* pLogStore) { + SSyncLogStoreData* pData = pLogStore->data; + SWal* pWal = pData->pWal; + return walGetCommittedVer(pWal); +} + // for debug ----------------- void logStorePrint(SSyncLogStore* pLogStore) { char* serialized = logStore2Str(pLogStore); From f8b0c98a71eaca40d3841565d3c8554648b6f42c Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Sat, 6 Aug 2022 20:44:14 +0800 Subject: [PATCH 04/36] refactor(sync): make leader life longer --- source/libs/sync/src/syncCommit.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index a603cfff27..3a94ed9713 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -92,6 +92,12 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { } } + // advance commit index as large as possible + SyncIndex walCommitVer = logStoreWalCommitVer(pSyncNode->pLogStore); + if (walCommitVer > newCommitIndex) { + newCommitIndex = walCommitVer; + } + // maybe execute fsm if (newCommitIndex > pSyncNode->commitIndex) { SyncIndex beginIndex = pSyncNode->commitIndex + 1; From 2f1bf1eba911a1d79ebe174e631f9b0d69a4a585 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Sat, 6 Aug 2022 21:41:03 +0800 Subject: [PATCH 05/36] refactor(sync): make leader life longer --- source/libs/sync/src/syncAppendEntries.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 85d1fa6d31..4f93d8197d 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -570,7 +570,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc // append entry batch if (pass == 0) { // assert! no batch - ASSERT(pMsg->dataCount == 1); + ASSERT(pMsg->dataCount <= 1); for (int32_t i = 0; i < pMsg->dataCount; ++i) { SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); @@ -698,7 +698,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc // append entry batch if (pass == 0) { // assert! no batch - ASSERT(pMsg->dataCount == 1); + ASSERT(pMsg->dataCount <= 1); // append entry batch for (int32_t i = 0; i < pMsg->dataCount; ++i) { From bfb6bce0073e2461a7be08e0b7242d069cc46872 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 7 Aug 2022 19:39:24 +0800 Subject: [PATCH 06/36] fix read error --- source/libs/transport/src/transCli.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index dc7ffa9b13..0b8829a0c0 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -482,6 +482,7 @@ void cliReadTimeoutCb(uv_timer_t* handle) { // set up timeout cb SCliConn* conn = handle->data; tTrace("%s conn %p timeout, ref:%d", CONN_GET_INST_LABEL(conn), conn, T_REF_VAL_GET(conn)); + uv_read_stop(conn->stream); cliHandleExceptImpl(conn, TSDB_CODE_RPC_TIMEOUT); } From 9c38e704d9f6550d0cc9e8b39e03604bb4336bbb Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 8 Aug 2022 09:34:12 +0800 Subject: [PATCH 07/36] build: add win package --- cmake/cmake.install | 4 +- contrib/CMakeLists.txt | 17 ++++ include/os/osDef.h | 4 +- packaging/release.bat | 109 ++++++++++++--------- packaging/release.sh | 2 +- packaging/tools/favicon.ico | Bin 0 -> 4286 bytes packaging/tools/install.sh | 1 - packaging/tools/makepkg.sh | 3 +- packaging/tools/taos.bat | 6 ++ packaging/tools/tdengine.iss | 81 +++++++++++++++ packaging/tools/windows_before_install.txt | 3 + source/os/src/osAtomic.c | 22 ++++- source/os/src/osString.c | 4 +- 13 files changed, 198 insertions(+), 58 deletions(-) create mode 100644 packaging/tools/favicon.ico create mode 100644 packaging/tools/taos.bat create mode 100644 packaging/tools/tdengine.iss create mode 100644 packaging/tools/windows_before_install.txt diff --git a/cmake/cmake.install b/cmake/cmake.install index 27edd8e27a..d9f759217a 100644 --- a/cmake/cmake.install +++ b/cmake/cmake.install @@ -10,7 +10,9 @@ ELSEIF (TD_WINDOWS) # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector) # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector) # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .) - INSTALL(FILES ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg DESTINATION cfg) + INSTALL(CODE "IF (NOT EXISTS ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg) + execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg) + ENDIF ()") INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include) INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include) INSTALL(FILES ${TD_SOURCE_DIR}/include/libs/function/taosudf.h DESTINATION include) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index b4e8825431..de7b75a245 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -27,6 +27,10 @@ else () cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() +if(TD_LINUX_64 AND JEMALLOC_ENABLED) + cat("${TD_SUPPORT_DIR}/jemalloc_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif() + # pthread if(${BUILD_PTHREAD}) cat("${TD_SUPPORT_DIR}/pthread_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -392,6 +396,19 @@ if(${BUILD_WITH_SQLITE}) endif(NOT TD_WINDOWS) endif(${BUILD_WITH_SQLITE}) +# jemalloc +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + include(ExternalProject) + ExternalProject_Add(jemalloc + PREFIX "jemalloc" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND ${MAKE} + ) + INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) +ENDIF () + # addr2line if(${BUILD_ADDR2LINE}) if(NOT ${TD_WINDOWS}) diff --git a/include/os/osDef.h b/include/os/osDef.h index 14f38eb7ff..be8689398d 100644 --- a/include/os/osDef.h +++ b/include/os/osDef.h @@ -57,7 +57,7 @@ extern "C" { #if defined(WINDOWS) char *stpcpy (char *dest, const char *src); - char *stpncpy (char *dest, const char *src, size_t n); + char *stpncpy (char *dest, const char *src, int n); // specific #ifndef __COMPAR_FN_T @@ -77,7 +77,7 @@ extern "C" { char * strsep(char **stringp, const char *delim); char * getpass(const char *prefix); - char * strndup(const char *s, size_t n); + char * strndup(const char *s, int n); // for send function in tsocket.c #define MSG_NOSIGNAL 0 diff --git a/packaging/release.bat b/packaging/release.bat index c1cf7875a5..d58e19cece 100644 --- a/packaging/release.bat +++ b/packaging/release.bat @@ -2,61 +2,78 @@ set internal_dir=%~dp0\..\..\ set community_dir=%~dp0\.. -cd %community_dir% -git checkout -- . -cd %community_dir%\packaging +set package_dir=%cd% :: %1 name %2 version if !%1==! GOTO USAGE if !%2==! GOTO USAGE -if %1 == taos GOTO TAOS -if %1 == power GOTO POWER -if %1 == tq GOTO TQ -if %1 == pro GOTO PRO -if %1 == kh GOTO KH -if %1 == jh GOTO JH -GOTO USAGE -:TAOS -goto RELEASE - -:POWER -call sed_power.bat %community_dir% -goto RELEASE - -:TQ -call sed_tq.bat %community_dir% -goto RELEASE - -:PRO -call sed_pro.bat %community_dir% -goto RELEASE - -:KH -call sed_kh.bat %community_dir% -goto RELEASE - -:JH -call sed_jh.bat %community_dir% -goto RELEASE - -:RELEASE -echo release windows-client-64 for %1, version: %2 -if not exist %internal_dir%\debug\ver-%2-64bit-%1 ( - md %internal_dir%\debug\ver-%2-64bit-%1 +if "%1" == "cluster" ( + set work_dir=%internal_dir% + set packagServerName_x64=TDengine-enterprise-server-%2-beta-Windows-x64 + set packagServerName_x86=TDengine-enterprise-server-%2-beta-Windows-x86 + set packagClientName_x64=TDengine-enterprise-client-%2-beta-Windows-x64 + set packagClientName_x86=TDengine-enterprise-client-%2-beta-Windows-x86 ) else ( - rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1 - md %internal_dir%\debug\ver-%2-64bit-%1 + set work_dir=%community_dir% + set packagServerName_x64=TDengine-server-%2-Windows-x64 + set packagServerName_x86=TDengine-server-%2-Windows-x86 + set packagClientName_x64=TDengine-client-%2-Windows-x64 + set packagClientName_x86=TDengine-client-%2-Windows-x86 ) -cd %internal_dir%\debug\ver-%2-64bit-%1 -call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64 -cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64 -set CL=/MP4 -nmake install + +echo release windows-client for %1, version: %2 +if not exist %work_dir%\debug ( + md %work_dir%\debug +) +if not exist %work_dir%\debug\ver-%2-x64 ( + md %work_dir%\debug\ver-%2-x64 +) else ( + rd /S /Q %work_dir%\debug\ver-%2-x64 + md %work_dir%\debug\ver-%2-x64 +) +if not exist %work_dir%\debug\ver-%2-x86 ( + md %work_dir%\debug\ver-%2-x86 +) else ( + rd /S /Q %work_dir%\debug\ver-%2-x86 + md %work_dir%\debug\ver-%2-x86 +) +cd %work_dir%\debug\ver-%2-x64 +call vcvarsall.bat x64 +cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x64 +cmake --build . +rd /s /Q C:\TDengine +cmake --install . +if not %errorlevel% == 0 ( call :RUNFAILED build x64 failed & exit /b 1) +cd %package_dir% +iscc /DMyAppInstallName="%packagServerName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release +if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x64% failed & exit /b 1) +iscc /DMyAppInstallName="%packagClientName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release +if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x64% failed & exit /b 1) + +cd %work_dir%\debug\ver-%2-x86 +call vcvarsall.bat x86 +cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x86 +cmake --build . +rd /s /Q C:\TDengine +cmake --install . +if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1) +cd %package_dir% +iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release +if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1) +iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release +if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1) + goto EXIT0 :USAGE -echo Usage: release.bat $productName $version +echo Usage: release.bat $verMode $version goto EXIT0 -:EXIT0 \ No newline at end of file +:EXIT0 +exit /b + +:RUNFAILED +echo %* +cd %package_dir% +goto :eof \ No newline at end of file diff --git a/packaging/release.sh b/packaging/release.sh index 3426c2856d..09781dbe8e 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -26,7 +26,7 @@ soMode=dynamic # [static | dynamic] dbName=taos # [taos | ...] allocator=glibc # [glibc | jemalloc] verNumber="" -verNumberComp="2.0.0.0" +verNumberComp="3.0.0.0" httpdBuild=false while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do diff --git a/packaging/tools/favicon.ico b/packaging/tools/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..20b8026d1d2efd6d77b013a1b94cee7aaed11772 GIT binary patch literal 4286 zcmd6qPiPfa6vn3^1PLL8AR&}HB}fP&5~N5e5;TM$B0+qiiy{(Sgdh@ZDWR31h9VM* zNRS{wL?j6kL8Q1ST?8w|m2}~%i!RDTq?97|8M1iD^S#^OH<>%mjFZe*vT(xZx%d3J z_uO;NJ@@H(AMw}IK~uym zIKbvh@b_}byns#=G>g=*`4~K2F8QyaKZ5p1m@`0EXZ{OVVQJK_KzD*}@JRs>H}eB} zfqh6%=WH8^(QjqXe`5A)tk=c-1o*lDboMzA-+waNpuk|MWG1m^Uw<8(2OZ?1xqzvM zZ72TsVe>nvzG_GD7Mj3e_P++*x!EG2-Y8UOSM5@JM)_*n%w{t_Hsjdt zf$jw-fd_VjTVNn<2d`~OIWPGWjODv<87Lm&7mA%*7rOMZvfv4DCcRW$-ndj#KUHLvwtwrZyuKRdR z`-k#D_fI}+&zqnh z$QKh|1AX~=F3!suqoW=(l$OEc4wzNWFw8hD4(9MXkzWBnGj541VXHVP2MK;$M`Pr= zumFB{q!82&!~qLFwZz$v+A9D(l&06tcyCCjm_KOUFH7j{>c9;__5_H ziq16W--TWub~gC zYA@As`-aTO=lei!mEPH-K=Oo-ZcS|DSLRy|e~Q@ar_VbdGWo9kRh!!DB{&EM!nu1Z z^;W6Jj)T(~SPA-hw(M8*Co^ocj(RhFD)A=&6!Li^Bfk98d)Et8Luu@Xr&y->irb|O zzSGnBm52CV_u$bRjozte;5TYvA9N6Oz7xcdflPNaQ5&pF5A^%9Y~brWL+I!?jPmy! zJfX&dTrpOSw6k~68yKthZ!^z)vyhE}gKjUz&EzDrzavvF)eq$#wP&cww|luY(iGh` zFbkB=L@hGcIieoTxCcz9*WkSRy|74p)tl4e+&MPH%^ZWO_a%Nm!C&AU8}TYpr={fk zPCOJN^}sFrRjv~#u!im<`S6E9V8Z^oADoM~b=1EBT+dORY2TBazsT~1bXbqv`BSWU z9;%#ngCn2?@MZ;l<^XY!pP4z zDd@&sF8P0Oe&W7;@y>IMJUnH8*E69ec(?NGwoA_5`t9fT*Z)!awrlMo>u`R(CF(g3 zfO>Bcvky+k@-=1}hu(yL8rw?E+UmcL>Z@g2|K%YH7s|Ci%eGd2VQpo-QhoZ`R=;Z4 zhOK^AuiEll)vnF1S6 Date: Mon, 8 Aug 2022 09:57:34 +0800 Subject: [PATCH 08/36] build: add win package --- source/os/src/osAtomic.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/os/src/osAtomic.c b/source/os/src/osAtomic.c index a65801df49..a9eb23abba 100644 --- a/source/os/src/osAtomic.c +++ b/source/os/src/osAtomic.c @@ -385,7 +385,8 @@ int64_t atomic_exchange_64(int64_t volatile* ptr, int64_t val) { return _InterlockedExchange((int32_t volatile*)(ptr), (int32_t)(val)); #else return _InterlockedExchange64((int64_t volatile*)(ptr), (int64_t)(val)); -#endif#elif defined(_TD_NINGSI_60) +#endif +#elif defined(_TD_NINGSI_60) return atomic_exchange_64_impl((int64_t*)ptr, (int64_t)val); #else return __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST); @@ -542,7 +543,8 @@ int64_t atomic_fetch_add_64(int64_t volatile* ptr, int64_t val) { return _InterlockedExchangeAdd((int32_t volatile*)(ptr), (int32_t)(val)); #else return _InterlockedExchangeAdd64((int64_t volatile*)(ptr), (int64_t)(val)); -#endif#elif defined(_TD_NINGSI_60) +#endif +#elif defined(_TD_NINGSI_60) return __sync_fetch_and_add((ptr), (val)); #else return __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST); From 0aac37a45c5b262cd32ad622ee042217c1352bd4 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Mon, 8 Aug 2022 11:25:32 +0800 Subject: [PATCH 09/36] enh: rsma fetch logic optimization --- include/common/tdatablock.h | 2 +- include/common/tmsgdef.h | 1 + source/common/src/tdatablock.c | 16 +- source/common/src/tglobal.c | 3 +- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 1 + source/dnode/vnode/src/inc/vnodeInt.h | 1 + source/dnode/vnode/src/sma/smaRollup.c | 174 ++++++++++++------ source/dnode/vnode/src/vnd/vnodeSvr.c | 2 + source/libs/sync/inc/syncRaftLog.h | 2 + source/libs/sync/src/syncAppendEntries.c | 112 ++++++----- source/libs/sync/src/syncCommit.c | 6 + source/libs/sync/src/syncMain.c | 3 + source/libs/sync/src/syncRaftLog.c | 18 +- .../script/tsim/sma/rsmaCreateInsertQuery.sim | 4 +- .../tsim/sma/rsmaPersistenceRecovery.sim | 8 +- 15 files changed, 234 insertions(+), 119 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 7839859e8b..9ca67056c6 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -246,7 +246,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag); // for debug char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf); -int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, +int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId, tb_uid_t suid); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 20dc04631e..9ddb872007 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -200,6 +200,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp) + TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 302874962e..bf33976c08 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1874,21 +1874,20 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) * @brief TODO: Assume that the final generated result it less than 3M * * @param pReq - * @param pDataBlocks + * @param pDataBlock * @param vgId - * @param suid // TODO: check with Liao whether suid response is reasonable + * @param suid * - * TODO: colId should be set */ -int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, +int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId, tb_uid_t suid) { - int32_t sz = taosArrayGetSize(pDataBlocks); int32_t bufSize = sizeof(SSubmitReq); + int32_t sz = 1; for (int32_t i = 0; i < sz; ++i) { - SDataBlockInfo* pBlkInfo = &((SSDataBlock*)taosArrayGet(pDataBlocks, i))->info; + const SDataBlockInfo* pBlkInfo = &pDataBlock->info; - int32_t numOfCols = taosArrayGetSize(pDataBlocks); - bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(numOfCols)); + int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); + bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(colNum)); bufSize += sizeof(SSubmitBlk); } @@ -1905,7 +1904,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks tdSRowInit(&rb, pTSchema->version); for (int32_t i = 0; i < sz; ++i) { - SSDataBlock* pDataBlock = taosArrayGet(pDataBlocks, i); int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); int32_t rows = pDataBlock->info.rows; // int32_t rowSize = pDataBlock->info.rowSize; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index f836cd76ac..a0f02d96f9 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -401,7 +401,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1); if (cfgAddInt32(pCfg, "numOfVnodeWriteThreads", tsNumOfVnodeWriteThreads, 1, 1024, 0) != 0) return -1; - tsNumOfVnodeSyncThreads = tsNumOfCores; + // tsNumOfVnodeSyncThreads = tsNumOfCores; + tsNumOfVnodeSyncThreads = 32; tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 1); if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index eca61dd960..1b799b1e5e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -347,6 +347,7 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 4c6320ecb5..47f7d209b3 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -187,6 +187,7 @@ int32_t smaAsyncPreCommit(SSma* pSma); int32_t smaAsyncCommit(SSma* pSma); int32_t smaAsyncPostCommit(SSma* pSma); int32_t smaDoRetention(SSma* pSma, int64_t now); +int32_t smaProcessFetch(SSma *pSma, void* pMsg); int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg); int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index fd2222c5e4..6b882251f4 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -36,19 +36,17 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputT int8_t level); static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid); static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo); - -static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, int64_t suid, - SRSmaStat *pStat, int8_t blkType); -static void tdRSmaFetchTrigger(void *param, void *tmrId); - -static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile); -static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish); -static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter); -static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *infoItem); - -static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables); -static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTaskFileVer); -static int32_t tdRSmaRestoreTSDataReload(SSma *pSma); +static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, + int64_t suid, int8_t blkType); +static void tdRSmaFetchTrigger(void *param, void *tmrId); +static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level); +static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile); +static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish); +static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter); +static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *infoItem); +static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables); +static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTaskFileVer); +static int32_t tdRSmaRestoreTSDataReload(SSma *pSma); static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) { // adapt accordingly if definition of SRSmaInfo update @@ -604,11 +602,8 @@ _end: return code; } -static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, int64_t suid, - SRSmaStat *pStat, int8_t blkType) { - SArray *pResult = NULL; - SSma *pSma = pStat->pSma; - +static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, + int64_t suid, int8_t blkType) { while (1) { SSDataBlock *output = NULL; uint64_t ts; @@ -619,30 +614,20 @@ static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *p pItem->level, terrstr(code)); goto _err; } - if (!output) { - break; - } - if (!pResult) { - pResult = taosArrayInit(1, sizeof(SSDataBlock)); - if (!pResult) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - } - - taosArrayPush(pResult, output); - - if (taosArrayGetSize(pResult) > 0) { -#if 1 + if (output) { +#if 0 char flag[10] = {0}; snprintf(flag, 10, "level %" PRIi8, pItem->level); + SArray *pResult = taosArrayInit(1, sizeof(SSDataBlock)); + taosArrayPush(pResult, output); blockDebugShowDataBlocks(pResult, flag); + taosArrayDestroy(pResult); #endif STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]); SSubmitReq *pReq = NULL; // TODO: the schema update should be handled later(TD-17965) - if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) < 0) { + if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) { smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma), suid, pItem->level, terrstr()); goto _err; @@ -659,18 +644,17 @@ static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *p SMA_VID(pSma), suid, pItem->level, output->info.version); taosMemoryFreeClear(pReq); - taosArrayClear(pResult); } else if (terrno == 0) { smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level); + break; } else { smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr()); + goto _err; } } - tdDestroySDataBlockArray(pResult); return TSDB_CODE_SUCCESS; _err: - tdDestroySDataBlockArray(pResult); return TSDB_CODE_FAILED; } @@ -694,11 +678,9 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType return TSDB_CODE_FAILED; } - SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); - SRSmaStat *pStat = SMA_RSMA_STAT(pEnv->pStat); SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx); - tdRSmaFetchAndSubmitResult(RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid, pStat, + tdRSmaFetchAndSubmitResult(pSma, RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid, STREAM_INPUT__DATA_SUBMIT); atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); @@ -724,11 +706,13 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) { SRSmaInfo *pRSmaInfo = NULL; if (!pEnv) { + terrno = TSDB_CODE_RSMA_INVALID_ENV; return NULL; } pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); if (!pStat || !RSMA_INFO_HASH(pStat)) { + terrno = TSDB_CODE_RSMA_INVALID_STAT; return NULL; } @@ -743,12 +727,12 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) { taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); return pRSmaInfo; } + taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); if (RSMA_COMMIT_STAT(pStat) == 0) { // return NULL if not in committing stat - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); return NULL; } - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + // clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat SRSmaInfo *pCowRSmaInfo = NULL; @@ -779,7 +763,7 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) { ASSERT(!pCowRSmaInfo); } - if(pCowRSmaInfo) { + if (pCowRSmaInfo) { tdRefRSmaInfo(pSma, pCowRSmaInfo); } // unlock @@ -1323,7 +1307,7 @@ _err: } /** - * @brief trigger to get rsma result + * @brief trigger to get rsma result in async mode * * @param param * @param tmrId @@ -1357,8 +1341,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { " refId:%d", SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId); if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) { - taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay > 5000 ? 5000 : pItem->maxDelay, pItem, smaMgmt.tmrHandle, - &pItem->tmrId); + taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId); } return; } @@ -1372,16 +1355,8 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { case TASK_TRIGGER_STAT_ACTIVE: { smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma), pItem->level, pRSmaInfo->suid); - - // sync procedure => async process - - SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; - qTaskInfo_t taskInfo = pRSmaInfo->taskInfo[pItem->level - 1]; - qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK); - tdRSmaFetchAndSubmitResult(taskInfo, pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat, - STREAM_INPUT__DATA_BLOCK); - tdCleanupStreamInputDataBlock(taskInfo); - + // async process + tdRSmaFetchSend(pSma, pRSmaInfo, pItem->level); } break; case TASK_TRIGGER_STAT_PAUSED: { smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is paused", @@ -1404,3 +1379,90 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { _end: tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId); } + +/** + * @brief put rsma fetch msg to fetch queue + * + * @param pSma + * @param pInfo + * @param level + * @return int32_t + */ +int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) { + SRSmaFetchMsg fetchMsg = {.refId = pInfo->refId, .suid = pInfo->suid, .level = level}; + int32_t ret = 0; + int32_t contLen = 0; + SEncoder encoder = {0}; + tEncodeSize(tEncodeSRSmaFetchMsg, &fetchMsg, contLen, ret); + if (ret < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + tEncoderClear(&encoder); + goto _err; + } + + void *pBuf = rpcMallocCont(contLen); + tEncoderInit(&encoder, pBuf, contLen); + if (tEncodeSRSmaFetchMsg(&encoder, &fetchMsg) < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + tEncoderClear(&encoder); + } + tEncoderClear(&encoder); + SRpcMsg rpcMsg = { + .code = 0, + .msgType = TDMT_VND_FETCH_RSMA, + .pCont = pBuf, + .contLen = contLen, + }; + + if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, FETCH_QUEUE, &rpcMsg)) != 0) { + smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%d level:%" PRIi8 " since %s", + SMA_VID(pSma), pInfo->suid, level, terrstr()); + goto _err; + } + + return TSDB_CODE_SUCCESS; +_err: + return TSDB_CODE_FAILED; +} + +int32_t smaProcessFetch(SSma *pSma, void *pMsg) { + SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg; + SRSmaFetchMsg req = {0}; + SDecoder decoder = {0}; + SRSmaInfo *pInfo = NULL; + SRSmaInfoItem *pItem = NULL; + + tDecoderInit(&decoder, pRpcMsg->pCont, pRpcMsg->contLen); + if (tDecodeSRSmaFetchMsg(&decoder, &req) < 0) { + terrno = TSDB_CODE_INVALID_MSG; + goto _err; + } + + pInfo = tdAcquireRSmaInfoBySuid(pSma, req.suid); + if (!pInfo) { + smaDebug("vgId:%d, failed to process rsma fetch msg since Empty rsma info", SMA_VID(pSma)); + goto _err; + } + + pItem = RSMA_INFO_ITEM(pInfo, req.level - 1); + + SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; + qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, req.level - 1); + if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) { + goto _err; + } + if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid, STREAM_INPUT__DATA_BLOCK) < 0) { + goto _err; + } + + tdCleanupStreamInputDataBlock(taskInfo); + + tdReleaseRSmaInfo(pSma, pInfo); + tDecoderClear(&decoder); + return TSDB_CODE_SUCCESS; +_err: + tdReleaseRSmaInfo(pSma, pInfo); + tDecoderClear(&decoder); + smaError("vgId:%d, failed to process rsma fetch msg since %s", SMA_VID(pSma), terrstr()); + return TSDB_CODE_FAILED; +} diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 15cf183b2a..0f8ec07016 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -325,6 +325,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { return vnodeGetTableCfg(pVnode, pMsg, true); case TDMT_VND_BATCH_META: return vnodeGetBatchMeta(pVnode, pMsg); + case TDMT_VND_FETCH_RSMA: + return smaProcessFetch(pVnode->pSma, pMsg); case TDMT_VND_CONSUME: return tqProcessPollReq(pVnode->pTq, pMsg); case TDMT_STREAM_TASK_RUN: diff --git a/source/libs/sync/inc/syncRaftLog.h b/source/libs/sync/inc/syncRaftLog.h index 65ec77e38f..ff59189a9d 100644 --- a/source/libs/sync/inc/syncRaftLog.h +++ b/source/libs/sync/inc/syncRaftLog.h @@ -47,6 +47,8 @@ char* logStoreSimple2Str(SSyncLogStore* pLogStore); SyncIndex logStoreFirstIndex(SSyncLogStore* pLogStore); +SyncIndex logStoreWalCommitVer(SSyncLogStore* pLogStore); + // for debug void logStorePrint(SSyncLogStore* pLogStore); void logStorePrint2(char* s, SSyncLogStore* pLogStore); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index f31f3dd1ae..4f93d8197d 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -357,16 +357,14 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); ASSERT(code == 0); - char eventLog[128]; - snprintf(eventLog, sizeof(eventLog), "log truncate, from %" PRId64 " to %" PRId64, delBegin, delEnd); - syncNodeEventLog(ths, eventLog); - logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); - return code; } +// if FromIndex > walCommitVer, return 0 +// else return num of pass entries static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) { - int32_t code; + int32_t code = 0; + int32_t pass = 0; SyncIndex delBegin = FromIndex; SyncIndex delEnd = ths->pLogStore->syncLogLastIndex(ths->pLogStore); @@ -398,16 +396,31 @@ static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) { } } + // update delete begin + SyncIndex walCommitVer = logStoreWalCommitVer(ths->pLogStore); + + if (delBegin <= walCommitVer) { + delBegin = walCommitVer + 1; + pass = walCommitVer - delBegin + 1; + + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "update delete begin to %ld", delBegin); + syncNodeEventLog(ths, logBuf); + } while (0); + } + // delete confict entries code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); ASSERT(code == 0); - char eventLog[128]; - snprintf(eventLog, sizeof(eventLog), "log truncate, from %" PRId64 " to %" PRId64, delBegin, delEnd); - syncNodeEventLog(ths, eventLog); - logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "make log same from:%ld, delbegin:%ld, pass:%d", FromIndex, delBegin, pass); + syncNodeEventLog(ths, logBuf); + } while (0); - return code; + return pass; } int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry, int32_t code) { @@ -543,31 +556,34 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc SOffsetAndContLen* metaTableArr = syncAppendEntriesBatchMetaTableArray(pMsg); if (hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex) { + int32_t pass = 0; + SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); + bool hasExtraEntries = logLastIndex > pMsg->prevLogIndex; + // make log same - do { - SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); - bool hasExtraEntries = logLastIndex > pMsg->prevLogIndex; - - if (hasExtraEntries) { - // make log same, rollback deleted entries - code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); - ASSERT(code == 0); - } - - } while (0); + if (hasExtraEntries) { + // make log same, rollback deleted entries + pass = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); + ASSERT(pass >= 0); + } // append entry batch - for (int32_t i = 0; i < pMsg->dataCount; ++i) { - SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); - if (code != 0) { - return -1; + if (pass == 0) { + // assert! no batch + ASSERT(pMsg->dataCount <= 1); + + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); + code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); + if (code != 0) { + return -1; + } + + code = syncNodePreCommit(ths, pAppendEntry, 0); + ASSERT(code == 0); + + // syncEntryDestory(pAppendEntry); } - - code = syncNodePreCommit(ths, pAppendEntry, 0); - ASSERT(code == 0); - - // syncEntryDestory(pAppendEntry); } // fsync once @@ -670,25 +686,33 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncLogRecvAppendEntriesBatch(ths, pMsg, "really match"); + int32_t pass = 0; + if (hasExtraEntries) { // make log same, rollback deleted entries - code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); - ASSERT(code == 0); + pass = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); + ASSERT(pass >= 0); } if (hasAppendEntries) { // append entry batch - for (int32_t i = 0; i < pMsg->dataCount; ++i) { - SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); - if (code != 0) { - return -1; + if (pass == 0) { + // assert! no batch + ASSERT(pMsg->dataCount <= 1); + + // append entry batch + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); + code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); + if (code != 0) { + return -1; + } + + code = syncNodePreCommit(ths, pAppendEntry, 0); + ASSERT(code == 0); + + // syncEntryDestory(pAppendEntry); } - - code = syncNodePreCommit(ths, pAppendEntry, 0); - ASSERT(code == 0); - - // syncEntryDestory(pAppendEntry); } // fsync once diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index a603cfff27..3a94ed9713 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -92,6 +92,12 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { } } + // advance commit index as large as possible + SyncIndex walCommitVer = logStoreWalCommitVer(pSyncNode->pLogStore); + if (walCommitVer > newCommitIndex) { + newCommitIndex = walCommitVer; + } + // maybe execute fsm if (newCommitIndex > pSyncNode->commitIndex) { SyncIndex beginIndex = pSyncNode->commitIndex + 1; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 78004c0ad6..c17d91182e 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2409,6 +2409,9 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) { SSyncNode* pSyncNode = (SSyncNode*)param; + + syncNodeEventLog(pSyncNode, "eq hb timer"); + if (pSyncNode->replicaNum > 1) { if (atomic_load_64(&pSyncNode->heartbeatTimerLogicClockUser) <= atomic_load_64(&pSyncNode->heartbeatTimerLogicClock)) { diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index b575e40d86..0649e064e4 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -305,10 +305,18 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, return code; } +// truncate semantic static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIndex) { SSyncLogStoreData* pData = pLogStore->data; SWal* pWal = pData->pWal; - int32_t code = walRollback(pWal, fromIndex); + + // need not truncate + SyncIndex wallastVer = walGetLastVer(pWal); + if (fromIndex > wallastVer) { + return 0; + } + + int32_t code = walRollback(pWal, fromIndex); if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); @@ -323,7 +331,7 @@ static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIn // event log do { char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "wal truncate, from-index:%" PRId64, fromIndex); + snprintf(logBuf, sizeof(logBuf), "log truncate, from-index:%" PRId64, fromIndex); syncNodeEventLog(pData->pSyncNode, logBuf); } while (0); @@ -637,6 +645,12 @@ SyncIndex logStoreFirstIndex(SSyncLogStore* pLogStore) { return walGetFirstVer(pWal); } +SyncIndex logStoreWalCommitVer(SSyncLogStore* pLogStore) { + SSyncLogStoreData* pData = pLogStore->data; + SWal* pWal = pData->pWal; + return walGetCommittedVer(pWal); +} + // for debug ----------------- void logStorePrint(SSyncLogStore* pLogStore) { char* serialized = logStore2Str(pLogStore); diff --git a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim index bde56cb862..86bdbdcded 100644 --- a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim @@ -29,8 +29,8 @@ sql insert into ct1 values(now, 10); sql insert into ct1 values(now+1s, 1); sql insert into ct1 values(now+2s, 100); -print =============== wait maxdelay 15+1 seconds for results -sleep 16000 +print =============== wait maxdelay 15+2 seconds for results +sleep 17000 print =============== select * from retention level 2 from memory sql select * from ct1; diff --git a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim index 1b54e5a47d..405d22ebdd 100644 --- a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim +++ b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim @@ -29,8 +29,8 @@ sql insert into ct1 values(now, 10, 10.0); sql insert into ct1 values(now+1s, 1, 1.0); sql insert into ct1 values(now+2s, 100, 100.0); -print =============== wait maxdelay 5+1 seconds for results -sleep 6000 +print =============== wait maxdelay 5+2 seconds for results +sleep 7000 print =============== select * from retention level 2 from memory sql select * from ct1; @@ -135,8 +135,8 @@ print =============== insert after rsma qtaskinfo recovery sql insert into ct1 values(now, 50, 500.0); sql insert into ct1 values(now+1s, 40, 40.0); -print =============== wait maxdelay 5+1 seconds for results -sleep 6000 +print =============== wait maxdelay 5+2 seconds for results +sleep 7000 print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery sql select * from ct1; From 43026ac31dec1ad19083e047db29c9287aed34d3 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 8 Aug 2022 11:09:39 +0800 Subject: [PATCH 10/36] refactor(stream) --- docs/zh/07-develop/07-tmq.md | 6 +- include/common/tcommon.h | 1 + include/common/tmsg.h | 2 +- include/libs/stream/tstream.h | 29 +++-- source/client/src/tmq.c | 3 +- source/dnode/mnode/impl/inc/mndDef.h | 3 +- source/dnode/mnode/impl/src/mndDef.c | 4 +- source/dnode/mnode/impl/src/mndScheduler.c | 21 ++-- source/dnode/mnode/impl/src/mndStream.c | 8 +- source/libs/stream/CMakeLists.txt | 3 +- source/libs/stream/src/streamMeta.c | 16 +-- source/libs/stream/src/streamRecover.c | 121 +++++++++++++-------- source/libs/stream/src/streamTask.c | 4 +- 13 files changed, 123 insertions(+), 98 deletions(-) diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md index 7faccdcec1..459b88085f 100644 --- a/docs/zh/07-develop/07-tmq.md +++ b/docs/zh/07-develop/07-tmq.md @@ -6,11 +6,11 @@ title: 数据订阅 为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。 -与 kafka 一样,你需要定义 topic, 但 TDengine 的 topic 可以是一张超级表,或一张子表。不仅如此,你可以通过标签、表名、列、表达式等多种方法过滤所需数据,并且支持对数据进行函数变换、预处理(包括标量udf计算)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤交给 TDengine,而不是应用完成,有效的减少传输的数据量。 +与 kafka 一样,你需要定义 topic, 但 TDengine 的 topic 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 SELECT 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。 -消费者订阅 topic 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程分布式的消费数据,提高数据通吐率。但不同消费者组即使消费同一个topic, 并不共享消费进度。一个消费者组可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的vnode上,也就是多个shard上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的ACK机制,在宕机、重启等复杂环境下确保at least once消费。 +消费者订阅 topic 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的ACK机制,在宕机、重启等复杂环境下确保 at least once 消费。 -为了实现上述功能,TDengine 采用了灵活的 WAL (Write-Ahead-Log) 文件切换与保留机制:可以按照时间或文件大小来保留WAL文件(详见create database语句)。在消费时,TDengine 从 WAL 中获取数据,并经过过滤、变换等操作,将数据推送给消费者。 +为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。 本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。 diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 7d78c2dc2f..be18ef1fc0 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -104,6 +104,7 @@ typedef struct SDataBlockInfo { uint32_t capacity; // TODO: optimize and remove following int64_t version; // used for stream, and need serialization + int64_t ts; // used for stream, and need serialization int32_t childId; // used for stream, do not serialize EStreamType type; // used for stream, do not serialize STimeWindow calWin; // used for stream, do not serialize diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 3c3071c8df..9eabeb4f58 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -3075,7 +3075,7 @@ typedef struct { void* msg; } SBatchRsp; -static FORCE_INLINE void tFreeSBatchRsp(void *p) { +static FORCE_INLINE void tFreeSBatchRsp(void* p) { if (NULL == p) { return; } diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 103ca6a4f0..239fcdad8d 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -17,6 +17,7 @@ #include "os.h" #include "query.h" #include "tdatablock.h" +#include "tdbInt.h" #include "tmsg.h" #include "tmsgcb.h" #include "tqueue.h" @@ -85,6 +86,12 @@ enum { TASK_OUTPUT__FETCH, }; +enum { + STREAM_QUEUE__SUCESS = 1, + STREAM_QUEUE__FAILED, + STREAM_QUEUE__PROCESSING, +}; + typedef struct { int8_t type; } SStreamQueueItem; @@ -123,12 +130,6 @@ typedef struct { SSDataBlock* pBlock; } SStreamTrigger; -enum { - STREAM_QUEUE__SUCESS = 1, - STREAM_QUEUE__FAILED, - STREAM_QUEUE__PROCESSING, -}; - typedef struct { STaosQueue* queue; STaosQall* qall; @@ -233,6 +234,7 @@ typedef struct { typedef struct SStreamTask { int64_t streamId; int32_t taskId; + int32_t totalLevel; int8_t taskLevel; int8_t outputType; int16_t dispatchMsgType; @@ -458,9 +460,20 @@ int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp); int32_t streamTryExec(SStreamTask* pTask); int32_t streamSchedExec(SStreamTask* pTask); -typedef struct SStreamMeta SStreamMeta; +typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask); -SStreamMeta* streamMetaOpen(); +typedef struct SStreamMeta { + char* path; + TDB* db; + TTB* pTaskDb; + TTB* pStateDb; + SHashObj* pTasks; + void* ahandle; + TXN txn; + FTaskExpand* expandFunc; +} SStreamMeta; + +SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc); void streamMetaClose(SStreamMeta* streamMeta); int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask); diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index f7d45dc6ff..073f772ee4 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -1699,7 +1699,8 @@ int32_t tmq_consumer_close(tmq_t* tmq) { tmq_list_destroy(lst); - return rsp; + /*return rsp;*/ + return 0; } // TODO: free resources return 0; diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 06c64dcea6..c4da9b5c3d 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -604,11 +604,11 @@ typedef struct { int64_t createTime; int64_t updateTime; int32_t version; + int32_t totalLevel; int64_t smaId; // 0 for unused // info int64_t uid; int8_t status; - int8_t isDistributed; // config int8_t igExpired; int8_t trigger; @@ -647,7 +647,6 @@ typedef struct { typedef struct { int64_t uid; int64_t streamId; - int8_t isDistributed; int8_t status; int8_t stage; } SStreamRecoverObj; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index abac0573da..08ce161409 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -23,11 +23,11 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) { if (tEncodeI64(pEncoder, pObj->createTime) < 0) return -1; if (tEncodeI64(pEncoder, pObj->updateTime) < 0) return -1; if (tEncodeI32(pEncoder, pObj->version) < 0) return -1; + if (tEncodeI32(pEncoder, pObj->totalLevel) < 0) return -1; if (tEncodeI64(pEncoder, pObj->smaId) < 0) return -1; if (tEncodeI64(pEncoder, pObj->uid) < 0) return -1; if (tEncodeI8(pEncoder, pObj->status) < 0) return -1; - if (tEncodeI8(pEncoder, pObj->isDistributed) < 0) return -1; if (tEncodeI8(pEncoder, pObj->igExpired) < 0) return -1; if (tEncodeI8(pEncoder, pObj->trigger) < 0) return -1; @@ -69,11 +69,11 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) { if (tDecodeI64(pDecoder, &pObj->createTime) < 0) return -1; if (tDecodeI64(pDecoder, &pObj->updateTime) < 0) return -1; if (tDecodeI32(pDecoder, &pObj->version) < 0) return -1; + if (tDecodeI32(pDecoder, &pObj->totalLevel) < 0) return -1; if (tDecodeI64(pDecoder, &pObj->smaId) < 0) return -1; if (tDecodeI64(pDecoder, &pObj->uid) < 0) return -1; if (tDecodeI8(pDecoder, &pObj->status) < 0) return -1; - if (tDecodeI8(pDecoder, &pObj->isDistributed) < 0) return -1; if (tDecodeI8(pDecoder, &pObj->igExpired) < 0) return -1; if (tDecodeI8(pDecoder, &pObj->trigger) < 0) return -1; diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 218f82df18..a24b7ef459 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -307,10 +307,9 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) { terrno = TSDB_CODE_QRY_INVALID_INPUT; return -1; } - int32_t totLevel = LIST_LENGTH(pPlan->pSubplans); - ASSERT(totLevel <= 2); - pStream->tasks = taosArrayInit(totLevel, sizeof(void*)); - pStream->isDistributed = totLevel == 2; + int32_t planTotLevel = LIST_LENGTH(pPlan->pSubplans); + ASSERT(planTotLevel <= 2); + pStream->tasks = taosArrayInit(planTotLevel, sizeof(void*)); bool hasExtraSink = false; bool externalTargetDB = strcmp(pStream->sourceDb, pStream->targetDb) != 0; @@ -320,7 +319,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) { bool multiTarget = pDbObj->cfg.numOfVgroups > 1; - if (totLevel == 2 || externalTargetDB || multiTarget) { + if (planTotLevel == 2 || externalTargetDB || multiTarget) { /*if (true) {*/ SArray* taskOneLevel = taosArrayInit(0, sizeof(void*)); taosArrayPush(pStream->tasks, &taskOneLevel); @@ -338,8 +337,9 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) { } } } + pStream->totalLevel = planTotLevel + hasExtraSink; - if (totLevel > 1) { + if (planTotLevel > 1) { SStreamTask* pInnerTask; // inner level { @@ -371,13 +371,6 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) { return -1; } -#if 0 - SDbObj* pSourceDb = mndAcquireDb(pMnode, pStream->sourceDb); - ASSERT(pDbObj != NULL); - sdbRelease(pSdb, pSourceDb); - pInnerTask->numOfVgroups = pSourceDb->cfg.numOfVgroups; -#endif - if (tsSchedStreamToSnode) { SSnodeObj* pSnode = mndSchedFetchOneSnode(pMnode); if (pSnode == NULL) { @@ -464,7 +457,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) { } } - if (totLevel == 1) { + if (planTotLevel == 1) { SArray* taskOneLevel = taosArrayInit(0, sizeof(void*)); taosArrayPush(pStream->tasks, &taskOneLevel); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 902879701c..ba9bb2982f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -36,7 +36,7 @@ static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream); static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj *pNewStream); static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq); static int32_t mndProcessDropStreamReq(SRpcMsg *pReq); -static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq); +/*static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);*/ static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq); static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta); static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); @@ -55,7 +55,7 @@ int32_t mndInitStream(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_CREATE_STREAM, mndProcessCreateStreamReq); mndSetMsgHandle(pMnode, TDMT_MND_DROP_STREAM, mndProcessDropStreamReq); - mndSetMsgHandle(pMnode, TDMT_MND_RECOVER_STREAM, mndProcessRecoverStreamReq); + /*mndSetMsgHandle(pMnode, TDMT_MND_RECOVER_STREAM, mndProcessRecoverStreamReq);*/ mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DEPLOY_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DROP_RSP, mndTransProcessRsp); @@ -540,6 +540,7 @@ static int32_t mndPersistTaskRecoverReq(STrans *pTrans, SStreamTask *pTask) { return 0; } +#if 0 int32_t mndRecoverStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { if (pStream->isDistributed) { int32_t lv = taosArrayGetSize(pStream->tasks); @@ -573,6 +574,7 @@ int32_t mndRecoverStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStrea } return 0; } +#endif int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { int32_t lv = taosArrayGetSize(pStream->tasks); @@ -755,6 +757,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { return TSDB_CODE_ACTION_IN_PROGRESS; } +#if 0 static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; @@ -817,6 +820,7 @@ static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq) { return TSDB_CODE_ACTION_IN_PROGRESS; } +#endif int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { SSdb *pSdb = pMnode->pSdb; diff --git a/source/libs/stream/CMakeLists.txt b/source/libs/stream/CMakeLists.txt index 33e864158a..ceddf4f215 100644 --- a/source/libs/stream/CMakeLists.txt +++ b/source/libs/stream/CMakeLists.txt @@ -8,7 +8,8 @@ target_include_directories( target_link_libraries( stream - PRIVATE os util transport qcom executor tdb + PUBLIC tdb + PRIVATE os util transport qcom executor ) if(${BUILD_TEST}) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 7dfeefb261..085a0e4ce7 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -14,22 +14,8 @@ */ #include "executor.h" -#include "tdbInt.h" #include "tstream.h" -typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask); - -typedef struct SStreamMeta { - char* path; - TDB* db; - TTB* pTaskDb; - TTB* pStateDb; - SHashObj* pTasks; - void* ahandle; - TXN txn; - FTaskExpand* expandFunc; -} SStreamMeta; - SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) { SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta)); if (pMeta == NULL) { @@ -150,7 +136,7 @@ int32_t streamMetaAbort(SStreamMeta* pMeta) { return 0; } -int32_t streamRestoreTask(SStreamMeta* pMeta) { +int32_t streamLoadTasks(SStreamMeta* pMeta) { TBC* pCur = NULL; if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) { ASSERT(0); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 3530c05688..2a77ce8a91 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -87,53 +87,80 @@ int32_t tDecodeSMStreamTaskRecoverRsp(SDecoder* pDecoder, SMStreamTaskRecoverRsp return 0; } -int32_t streamProcessFailRecoverReq(SStreamTask* pTask, SMStreamTaskRecoverReq* pReq, SRpcMsg* pRsp) { -#if 0 - if (pTask->taskStatus != TASK_STATUS__FAIL) { - return 0; - } +typedef struct { + int32_t vgId; + int32_t childId; + int64_t ver; +} SStreamVgVerCheckpoint; - if (pTask->isStreamDistributed) { - if (pTask->taskType == TASK_TYPE__SOURCE) { - pTask->taskStatus = TASK_STATUS__PREPARE_RECOVER; - } else if (pTask->taskType != TASK_TYPE__SINK) { - pTask->taskStatus = TASK_STATUS__PREPARE_RECOVER; - bool hasCheckpoint = false; - int32_t childSz = taosArrayGetSize(pTask->childEpInfo); - for (int32_t i = 0; i < childSz; i++) { - SStreamChildEpInfo* pEpInfo = taosArrayGetP(pTask->childEpInfo, i); - if (pEpInfo->checkpointVer == -1) { - hasCheckpoint = true; - break; - } - } - if (hasCheckpoint) { - // load from checkpoint - } else { - // recover child - } - } - } else { - if (pTask->taskType == TASK_TYPE__SOURCE) { - if (pTask->checkpointVer != -1) { - // load from checkpoint - } else { - // reset stream query task info - // TODO get snapshot ver - pTask->recoverSnapVer = -1; - qStreamPrepareRecover(pTask->exec.executor, pTask->startVer, pTask->recoverSnapVer); - pTask->taskStatus = TASK_STATUS__RECOVERING; - } - } - } - - if (pTask->taskStatus == TASK_STATUS__RECOVERING) { - if (streamPipelineExec(pTask, 100) < 0) { - // set fail - return -1; - } - } - -#endif +int32_t tEncodeSStreamVgVerCheckpoint(SEncoder* pEncoder, const SStreamVgVerCheckpoint* pCheckpoint) { + if (tEncodeI32(pEncoder, pCheckpoint->vgId) < 0) return -1; + if (tEncodeI32(pEncoder, pCheckpoint->childId) < 0) return -1; + if (tEncodeI64(pEncoder, pCheckpoint->ver) < 0) return -1; + return 0; +} + +int32_t tDecodeSStreamVgVerCheckpoint(SDecoder* pDecoder, SStreamVgVerCheckpoint* pCheckpoint) { + if (tDecodeI32(pDecoder, &pCheckpoint->vgId) < 0) return -1; + if (tDecodeI32(pDecoder, &pCheckpoint->childId) < 0) return -1; + if (tDecodeI64(pDecoder, &pCheckpoint->ver) < 0) return -1; + return 0; +} + +typedef struct { + int64_t streamId; + int64_t checkTs; + int64_t checkpointId; + int32_t taskId; + SArray* checkpointVer; // SArray +} SStreamAggVerCheckpoint; + +int32_t tEncodeSStreamAggVerCheckpoint(SEncoder* pEncoder, const SStreamAggVerCheckpoint* pCheckpoint) { + if (tEncodeI64(pEncoder, pCheckpoint->streamId) < 0) return -1; + if (tEncodeI64(pEncoder, pCheckpoint->checkTs) < 0) return -1; + if (tEncodeI64(pEncoder, pCheckpoint->checkpointId) < 0) return -1; + if (tEncodeI32(pEncoder, pCheckpoint->taskId) < 0) return -1; + int32_t sz = taosArrayGetSize(pCheckpoint->checkpointVer); + if (tEncodeI32(pEncoder, sz) < 0) return -1; + for (int32_t i = 0; i < sz; i++) { + SStreamVgVerCheckpoint* pOneVgCkpoint = taosArrayGet(pCheckpoint->checkpointVer, i); + if (tEncodeSStreamVgVerCheckpoint(pEncoder, pOneVgCkpoint) < 0) return -1; + } + return 0; +} + +int32_t tDecodeSStreamAggVerCheckpoint(SDecoder* pDecoder, SStreamAggVerCheckpoint* pCheckpoint) { + if (tDecodeI64(pDecoder, &pCheckpoint->streamId) < 0) return -1; + if (tDecodeI64(pDecoder, &pCheckpoint->checkTs) < 0) return -1; + if (tDecodeI64(pDecoder, &pCheckpoint->checkpointId) < 0) return -1; + if (tDecodeI32(pDecoder, &pCheckpoint->taskId) < 0) return -1; + int32_t sz; + if (tDecodeI32(pDecoder, &sz) < 0) return -1; + for (int32_t i = 0; i < sz; i++) { + SStreamVgVerCheckpoint oneVgCheckpoint; + if (tDecodeSStreamVgVerCheckpoint(pDecoder, &oneVgCheckpoint) < 0) return -1; + taosArrayPush(pCheckpoint->checkpointVer, &oneVgCheckpoint); + } + return 0; +} + +int32_t streamRecoverSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) { + ASSERT(pTask->taskLevel == TASK_LEVEL__SINK); + // load status + void* pVal = NULL; + int32_t vLen = 0; + if (tdbTbGet(pMeta->pStateDb, &pTask->taskId, sizeof(void*), &pVal, &vLen) < 0) { + return -1; + } + SDecoder decoder; + tDecoderInit(&decoder, pVal, vLen); + SStreamAggVerCheckpoint aggCheckpoint; + tDecodeSStreamAggVerCheckpoint(&decoder, &aggCheckpoint); + /*pTask->*/ + return 0; +} + +int32_t streamRecoverTask(SStreamTask* pTask) { + // return 0; } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 3a54981989..8b5bd849f6 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -52,6 +52,7 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { /*if (tStartEncode(pEncoder) < 0) return -1;*/ if (tEncodeI64(pEncoder, pTask->streamId) < 0) return -1; if (tEncodeI32(pEncoder, pTask->taskId) < 0) return -1; + if (tEncodeI32(pEncoder, pTask->totalLevel) < 0) return -1; if (tEncodeI8(pEncoder, pTask->taskLevel) < 0) return -1; if (tEncodeI8(pEncoder, pTask->outputType) < 0) return -1; if (tEncodeI16(pEncoder, pTask->dispatchMsgType) < 0) return -1; @@ -62,7 +63,6 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { if (tEncodeI32(pEncoder, pTask->selfChildId) < 0) return -1; if (tEncodeI32(pEncoder, pTask->nodeId) < 0) return -1; if (tEncodeSEpSet(pEncoder, &pTask->epSet) < 0) return -1; - /*if (tEncodeI32(pEncoder, pTask->numOfVgroups) < 0) return -1;*/ int32_t epSz = taosArrayGetSize(pTask->childEpInfo); if (tEncodeI32(pEncoder, epSz) < 0) return -1; @@ -101,6 +101,7 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { /*if (tStartDecode(pDecoder) < 0) return -1;*/ if (tDecodeI64(pDecoder, &pTask->streamId) < 0) return -1; if (tDecodeI32(pDecoder, &pTask->taskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pTask->totalLevel) < 0) return -1; if (tDecodeI8(pDecoder, &pTask->taskLevel) < 0) return -1; if (tDecodeI8(pDecoder, &pTask->outputType) < 0) return -1; if (tDecodeI16(pDecoder, &pTask->dispatchMsgType) < 0) return -1; @@ -111,7 +112,6 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { if (tDecodeI32(pDecoder, &pTask->selfChildId) < 0) return -1; if (tDecodeI32(pDecoder, &pTask->nodeId) < 0) return -1; if (tDecodeSEpSet(pDecoder, &pTask->epSet) < 0) return -1; - /*if (tDecodeI32(pDecoder, &pTask->numOfVgroups) < 0) return -1;*/ int32_t epSz; if (tDecodeI32(pDecoder, &epSz) < 0) return -1; From e8ead35bd81dda99df2c7163c880c9ff6dda4688 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 8 Aug 2022 11:34:29 +0800 Subject: [PATCH 11/36] fix multi process problem --- source/libs/transport/src/transCli.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 0b8829a0c0..e1df181329 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -994,6 +994,8 @@ static void cliAsyncCb(uv_async_t* handle) { if (count >= 2) { tTrace("cli process batch size:%d", count); } + // if (!uv_is_active((uv_handle_t*)pThrd->prepare)) uv_prepare_start(pThrd->prepare, cliPrepareCb); + if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd->stopMsg, pThrd); } static void cliPrepareCb(uv_prepare_t* handle) { @@ -1089,7 +1091,7 @@ static SCliThrd* createThrdObj() { pThrd->prepare = taosMemoryCalloc(1, sizeof(uv_prepare_t)); uv_prepare_init(pThrd->loop, pThrd->prepare); pThrd->prepare->data = pThrd; - uv_prepare_start(pThrd->prepare, cliPrepareCb); + // uv_prepare_start(pThrd->prepare, cliPrepareCb); int32_t timerSize = 512; pThrd->timerList = taosArrayInit(timerSize, sizeof(void*)); @@ -1126,7 +1128,6 @@ static void destroyThrdObj(SCliThrd* pThrd) { taosMemoryFree(timer); } taosArrayDestroy(pThrd->timerList); - taosMemoryFree(pThrd->prepare); taosMemoryFree(pThrd->loop); taosMemoryFree(pThrd); From 536c6bdbc5e732ffba754e322e12c587ae251762 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 8 Aug 2022 11:40:48 +0800 Subject: [PATCH 12/36] fix: add checks for stream query --- source/common/src/systable.c | 8 ++++---- source/libs/parser/src/parTranslater.c | 23 +++++++++++++++++------ source/libs/planner/src/planSpliter.c | 2 +- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/source/common/src/systable.c b/source/common/src/systable.c index a79082ab23..b779f21cf5 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -16,8 +16,8 @@ #include "systable.h" #include "taos.h" #include "tdef.h" -#include "types.h" #include "tgrant.h" +#include "types.h" #define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE) @@ -97,7 +97,7 @@ static const SSysDbTableSchema userDBSchema[] = { {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_seg_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, }; static const SSysDbTableSchema userFuncSchema[] = { @@ -243,8 +243,8 @@ static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)}, {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)}, {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)}, -// {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)}, -// {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)}, + // {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)}, + // {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)}, {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)}, {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema)}, {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)}, diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 9971f20d3d..be16289595 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4708,6 +4708,12 @@ static int32_t translateKillTransaction(STranslateContext* pCxt, SKillStmt* pStm return buildCmdMsg(pCxt, TDMT_MND_KILL_TRANS, (FSerializeFunc)tSerializeSKillTransReq, &killReq); } +static bool crossTableWithoutAggOper(SSelectStmt* pSelect) { + return NULL == pSelect->pWindow && !pSelect->hasAggFuncs && !pSelect->hasIndefiniteRowsFunc && + !pSelect->hasInterpFunc && TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && + !isPartitionByTbname(pSelect->pPartitionByList); +} + static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { if (NULL != pStmt->pOptions->pWatermark && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pWatermark))) { @@ -4723,14 +4729,19 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt return TSDB_CODE_SUCCESS; } - if (QUERY_NODE_SELECT_STMT == nodeType(pStmt->pQuery)) { - SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; - if (QUERY_NODE_REAL_TABLE == nodeType(pSelect->pFromTable)) { - return TSDB_CODE_SUCCESS; - } + if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); } - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); + SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; + + if (QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable) || + TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type || + !pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); + } + + return TSDB_CODE_SUCCESS; } static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) { diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index e10f0586ca..97977878ad 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -268,7 +268,7 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { case QUERY_NODE_LOGIC_PLAN_JOIN: return stbSplNeedSplitJoin(streamQuery, (SJoinLogicNode*)pNode); case QUERY_NODE_LOGIC_PLAN_PARTITION: - return stbSplIsMultiTbScanChild(streamQuery, pNode); + return streamQuery ? false : stbSplIsMultiTbScanChild(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_AGG: return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_WINDOW: From 819a48a4646f8136065de66a9ab2ea179a818db4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 8 Aug 2022 11:46:31 +0800 Subject: [PATCH 13/36] fix: skip 0 row blocks to dismiss crashing --- source/dnode/vnode/src/tsdb/tsdbSnapshot.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index a9f07cbf24..c40fb98d62 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -115,12 +115,19 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) { TSDBROW row = tsdbRowFromBlockData(&pReader->oBlockData, iRow); int64_t version = TSDBROW_VERSION(&row); + tsdbTrace("vgId:%d, vnode snapshot tsdb read for %s, %" PRId64 "(%" PRId64 " , %" PRId64 ")", + TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path, version, pReader->sver, pReader->ever); + if (version < pReader->sver || version > pReader->ever) continue; code = tBlockDataAppendRow(&pReader->nBlockData, &row, NULL); if (code) goto _err; } + if (pReader->nBlockData.nRow <= 0) { + continue; + } + // org data // compress data (todo) int32_t size = sizeof(TABLEID) + tPutBlockData(NULL, &pReader->nBlockData); @@ -808,7 +815,8 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) { if (code) goto _err; _exit: - tsdbDebug("vgId:%d, vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); + tsdbDebug("vgId:%d, vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode), + pWriter->pTsdb->path); return code; _err: From 16ccf2ad20fb038364d4b71970a1296eeea90e6f Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Mon, 8 Aug 2022 12:39:05 +0800 Subject: [PATCH 14/36] test: rsma case adjust --- tests/system-test/1-insert/create_retentions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/1-insert/create_retentions.py b/tests/system-test/1-insert/create_retentions.py index 96f9ea51ef..a2c2254820 100644 --- a/tests/system-test/1-insert/create_retentions.py +++ b/tests/system-test/1-insert/create_retentions.py @@ -292,7 +292,7 @@ class TDTestCase: tdSql.execute(f"use {DB4}") self.__create_tb(rsma=True, dbname=DB4, ctb_num=db4_ctb_num, rsma_type="last") self.__insert_data(rows=self.rows, rsma=True, dbname=DB4, ctb_num=db4_ctb_num, rsma_type="last") - time.sleep(7) + time.sleep(8) tdSql.query(f"select count(*) from {DB4}.stb1 where ts > now()-5m") tdSql.checkRows(1) tdSql.checkData(0, 0, self.rows * db4_ctb_num) From cdd40e2863159650e4edd77bb3dd323fb11960be Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 8 Aug 2022 12:57:09 +0800 Subject: [PATCH 15/36] fix multi process problem --- source/libs/transport/src/.transSvr.c.swo | Bin 53248 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 source/libs/transport/src/.transSvr.c.swo diff --git a/source/libs/transport/src/.transSvr.c.swo b/source/libs/transport/src/.transSvr.c.swo deleted file mode 100644 index c9486c50867994926ea395577110ee4d15de5632..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 53248 zcmeI53wT^tb?-%a7AT~=pY4fLyOJWyj|7L=0wbx#I@3q!md!Mh)P)+$?C_2p){vSqP4*V@u< zmTIj=z1iNJmtv`m@8|cE|nugzLw{-#a?)|8&Rw zrBI=m|9v~|f34&G{lfjl@b~cD{Y!lQ{^9y_!{3i|+z&td&I$LQ6c`qt50}^ftaD(U z1M3`E=fFA#);X}wfpre7b6}kV>l|3;z&Z#1OE^%P@9Wz@$vb2Zv;H6L|NnSkU*8A8 zGB_9f!vp&Iz6M?n_JLmnzX1N~XZreH0$vPW1Wtmdg9?}j=Ym1-K=1(Y9~jbK0q+5C z0j~tV2~L9Rz>~ox;9M{W9tysLasDOn2jJ!4Bv=3yFbM|10JuN+E{6W6!Kc9c!Mng~ zK?Pg_?gxI1v*1VI@4+X)AA`4mmw=nVb3qkc0nP_M2kr~Lj}zjH;5P6!@O$8O;8(zn zpazz}72qQ9i{K&P!Qek|aC{B?Irts$Yv5Nw3(SK3;6m_y93}q*z6L%IJ_k;K$AVAd zWO)O)4r~K=;A40*Xn{+?eZalJJ;0}NNPHaJ0-g%0;5=|2aBuJ#+WVD2ev(CS0gwzl z4y@tRYM0v8a&oL*ollmJ9jLdf3n%tfYu8=ctY25D&D2L4js9tZH%9BV+F(-V|AAc) zT6k)@U23+cE4BHlO3PF4Zygy-mRd(hTTD9HtTal^iW2WBwM&+DOgsbuvvbP}v+cp; z_*~bcg;K_A?Mky&TFf$Nd2t=(UDm8rM(6s;_-Ls%zgSV;{;ezjL#4&*dM6z+_6#Pa zrYh7?!XctyMi!uErqsGl1t}~>#WX9|4<*%FI}rz#TixZ6Zcn|(Q>07=w}7NwU8-z` z=dIL&pVftY zC+*b)mbcpVjO5*SGMS#PG>=u96LU|cTO@P!`XZuF)6CNo6Ujp=myMjr?n;s^Tau?P zBQkW8>zAwTWT9DK^8ZRE+OgA+34Q6j?T8v5MXjlz{IzPst{j_fAR@HVV4^RDYkHDv z`>HUiW|U29V3;}DoL6%GBE+E%8WZBDw!Ad4pp=#76e&(9)2{pebgT28dVZxjK7WI5 zP}|%{KpqSAz@No>y&=)k-#~M_%oRvz8EbWMX5I_oV%uol)z?1JsLWRu7`~dz<#w(e zk{g-1leB(%+?vJMtY4Z;RB1&4j?-&(?fT`)a>eEk<4!DZ|8l!>gJz2TZL5YSg9`g_ zUC&P2pfX9*4|3VsTR(J*_K)@BR<4Tm*6hN(H|YYRVktxOkucwY3@jRils0~4=++~V zNIkGJUt&84wlG&Rp&8l60DjEC$wchb)nW~;oava*%88XuCtY1_s7v)|6Duq$*;7%G zqot-ED_>VRad>8W&!w}IBh%A|CZ_hx?i)LBt@n;{wQ;o4)WkxnzU-k^rQEEvdp(v* zw2bU{l7k1vbD8RTGQKBSs5g6RFQ!oa+n#E(zICVeihc4N-;oWKwsO2P&5 zklHg)iq+hs z{BlW;(*vxEx~{|;Fz9@ZnX-)`GqY1;m(A`O8{IcDHP%1SJyy8Fmj7>=>(VOw}NDgh4$|`sCtdLumu`mn5L` z#_l~)E7LtSSnaRRFE3WcRqDmbEgvDt%HGVZ$iR$8pnxsF%cN9hXHR?=wJk2Fh5!^!x9NMeSqRFaF1 zw%d)JTeciOeth^yZF#ufJVFq4xl(IYT3dDxOAWlLzMQm<)|VIO(Or*KXzX&OdJG+~ z#E8{6k<=Hgt6hHJU^1c!y`EfNsa2Y##bk1Mj=Yk6o*#KByUWIB4vbAtCznl3B_qk? z$kfdE=s_AXnLIc(IWawEWsV%UDmgSVH8pZz=Biy*2b7hha;#FbvZ_ms#VQmWFEwFY z+i2K7HZ{7J^dpy!?;D@FiiT5ZGr5{4Fkzl4vRR#D+?Nh(nH{Zbd^}oeCoukab#XD7 zt0W903(Jc`$ZQYt8%8dl+_zz1h&dr?sH&}_Ls;f%_Ik~#M?w514~X7$L?cG6!SNVad?c40CzGBKUVRvJpiYvtkO zqNg5hm+F^*7WF9&m+MQrrTEMKe+72)ZtQH?|K|kb?=#r^?*K0V3!nn_fo)(bcp$g~ z`~L^v1K^$DRp6DN2JQ#Gg1!Gva0_@7cqMoW7zGak_XPh&xqk}e3wRZfFW?p6GB5^2 z*TrD{v(ABa4y@xaKp+6?{~xs^5yC6ybF^5m=+uzhD| zSyq#&%3`I|s*K&>6!*vKO;r~5A8GevJ(|bKDEP?{w{54~v4Im?t+w(woDOkDxVb{L z;LtCp^8DuAsXKHi*|fBI_w@x%Zfu!PQ>?`4(KKgx4^CpAE-ucM%3`UNvad8(S)_#M z!G%e%dh>2?87so2An)eIXA)@29yD(gWXVCpecV9SimE?XX@4%ZgCj`(w*9tnsEgRQ z`zuTJ=7~{gtC#z?`GQ1!Aujn1KnB^2OkZ_ibarfNYW9g!V>8YbCono@-TyvMrd9`| z(H>R7!}3TDn*@*`_#W}CS=FJ&sN>F{5t)`Zx_4~!Nwau6_Us$$C)>Ob(TB{DnW(+- zW%4u+Z{D4iWPY$Hkb@$tkjA;ZE9;DrZP3$Swso+SK3`_V(I)$!CGo!RX(++|kMaF~ zg?;~4@Juia{uO)vU%)58$HB|Mlfln}zr&8d9y|m*7H#7 z8@>Ua0UiY20s%XL*5c>D9juqX5d0jtl{N9pz?WG6u7RHduVC$a5c~=2+*9Co*0Y}m z9su6N`nA@v|AN5#rvTOKyQ*9IfxmoC`k$ZZ|JU@z?3XG_txCIp6Rl9&yqm6sTX-nh zn#yMWIszIWJB`0tS^a}qKia&zywDiP^+?7|+*&o9h?^u5DKW!{8ol+v!F~HqU3^wn z&))3$W<=bX>ZCQ#%8J9hve(Ge^Z|M(x@mn?Dk(J@3`a_I>WZYL_?K8#bpbzGo;h+T zYmPdDpIFw(GnnT}S;eyAL~@rJWTisF^;nj8c(zRnwjeOM+&UV?KP-PB{I(7me6w4D`7)Xm1xMt~U;-5#v4Ns9dobF?@?R!qOP|gr~ONaN*;&UvP3)63a?w zyf`DD(O&G;k7b<}Gwx*KSeA7yX~de`?Z+tk@MQ!m>}46Rzu#7{29EOm&dnzxuTa(~ zg5LCH!`P9->fvdRv%RVw%kpv87hP#qDn$&-8SZ1TEXjt+Rgfym6T?tD{rZ{c)v>Hz zbw1PAVp-iYWmxzNQzlaxX7;B+(Yk^Y)={o0}!2zYS2mQYO#Yf=nMu-H9bF|2lX zrj~VNY12TSU?-t-*`m3kWe${fvKZLf zKWHPj8Rwq-@U;WcP&E?`w@GG?QlWW-JGn8-ywy--oj-d8;cD5hFlr9Pum!O(O&X~> zMOj5^V>w;ny4^&s^q}?HJ5{JEcghE2UFDAI7?@TMMz-di&m;r>V4+!g5|lK8Idlr+ zW#=;;sR=<)&|z8AX0m0_{~k5(SBOm++(I7-Ip)ZNaDquCo-X-Ek@tD;P35DN@^#7a zqZPsrnn`I<@_j-zok!;-w4&0kBn|vAm3GY6C=C^xMvu%`7y_TwK?ptR`@A-)WB~zK3t&GvHm|CEzMB4JN=j;79lzJ_LRX zycs+Wd<_3X30w)jkN@EX;341(_#kcr9{}$MzYP>?zX{wMh~J+R&wnBxoT3_9(G6X> z?$VPRdri@ap=Es*H+9~yZ0TgFdV@lA6o1rE$Vy95RE)6Ohm*Quo?5sO6NU9Ckc44> zvFhH!Fe#hgwT%1{6sxG)r6qz_5a~q)3zE&Y;dHES5Diqe=$aU)r1CBQCVK)XE=(T& zWFWd$*9gQoxNS(DJh_T3LIRh;ME;1cp66jPOj>1LSjUEr+VQjLR%M$5@frvvro67hU!$OVy?g1i;C3mHcn zh+QL+2YS_}kko!Fq&B*>6-AwS^+*|t%&sY|cSrvps`pgpmXGvru<-z=U8zh^J<)86 zwu>5@IqWR2v=Ob#u25O1h$Y9$nwdD_S&IgOKE)9qiHKtIB$A zp{MymfX3K@T^t_LjEt#Fbs)y-qD~q-9w8(B60Pa&kaLfsa*0%cqD<3F*w92YYd)Va z%8|&j)&mTB@(e&kWD14JQqqIbHysJd`=#SGEt*6zAeWSAt6(qV#TXF=T4w_zze0j7 zTG%!__4oH@y6r&VT#SJp*t}a-#!#Y=Vkx!Kf_>Xl{FUyWGTM~AQM-cFgyPjQv#mC^ zj+B2a0pBnw^T?yjzo;7!Dtaq_>Gi^^lFB*k&aXt5av$eqVPA_Gg;XIS3Uq z6MH85=Q;iOMD=K?KCp95yS3cpku=d-<;IEF+Z8sSPG|muIxSXev7L3zi_YYihKJ&# z?Rt&1x>Ty8IHZzg?zuv$bb3skYI+*`Pk+d=zj%EBAu|EVZ;H{HmJ`sRhtSH}_u`uf zG``q}B;vT;pCdZG5786*xj>jQC zDh-o6gCLqjiT!2As_hf0k3>YIdn@Q;#zRx*T}FBrKN7N0v^@Ds=t2a2x}gwi5DPRH zbgiV-sFbS5}Kh`y#hZ+&fxYhwSaVl<40MjPx`kG@Jqi>{}cUCN9u4ZtmqR$u!c{3GJ4Aj zUW3%Q(jG8&03NB;i3Ue7H{jj!CpC-6B4_cOGziri%e-^mrOa2c5}($7}kZv3?f0sur5gZ;&33TbIMC z*_vehRE)XC*8eft`#v7*|7x&Dwf6s9a5cCJd|Bj9H6dhl#84}J;Ef^qOL@HsMj zJva=m1{=WH;CAf)Uj;7$F9er@3&A76_ptqM1HTVm1l7zY4q<41rDHQQ%?V zr@$T9{NDrL1+NCr2Q_dx=m-B3oBtN@WRQSIfCqseVC#Pud>gz9{0h+i`zL`Xg0sOR zz#Z89p9Y@W^0TicjKDZnB1N;NW z!FAwLa1Pi2ZpSb1QE)T(4e)GmHJAi@!KL6F@Ccyx`pZuLfttHL)@;_BnZ}*PLfJ0k zV6<;+g^5kt)NG~MoUP%r?RSOR6^4QZXR6gf_EhKW4^0*}(a;O~T+-s-?24|hB*_e? z;d!Mhl&1^DOxBu>>;6(9&c4{=8O+Y{4AGxVw@BS!16DCxq9$}LE^K)hYoQ54UtfF_ z-lkffR^AkcmZLxDpO)7o3T6oQ2zeC}W^7X4Q2~!r^C^Nua`Yq^IWtlMv|aYj#d;KS zbNx*Y{g^3gmXnoT^7%uAJFES^?|*dD$?v%~5u;t?q98N=w`H1qCi!~fuCiTp+g42A zoAPWEANg5GoR_Eh*s}Mf`7CNo=C79#?L{|^gTa`m*@-+I>B!2?MLUcwHQFaS1t2R2 zb8 zG;&~P${*(@Q(h}pNf~3!tw3#b)$+sDU{OVAxC-!8yLRc(R-dV`cXW#Iy?*1s zbk8*%mXNCT9V4K0X89{2+V%e;4R#Htx^F%z^JeJP8|{pOpu+b)c_EgK?!9^U9C4#+ zrF69Dibd2?*#H~&EIB5SHFW&JqubM(o7O!_!(4jT4_$yrjXXa3xSBgRYydpKPs#A!c_Qk{-!Au`Np%x-4gK`qxY z&L~4<#5RyyUtTspke+6K#gM)z`sUh)O3hlqLu>b$t?NxZvB>jkL>Fgv%UTmVqeLTsW zDV|W(;EttAy&dGKyUJ~`y7B;dSI(M=<0n&N>LQ^RHBrfem&XqrkWl+-cZX*MMou-# zL`$>RKqEDS>$a_YT!%wZT4PkKwLepBv|rUd3SKxsR`nb;48G}vqZsdr-XhH<|9~>EIrW@g%zdZ`h*%UmzO^_t_8FH=!qjF z*a=9V^e?meV?A*iZpNY!lK7QOHf76BS`xyo9R^5KYuF&)aB0!aY_y4dEe6iB+XG@H z(828(*Y;T4Q5HCzz!#hi?GIGwLbcdAgA zH1yH(2y@NarBv(xn7V!EVpD4U|JlL*{|9XTKL@t~odfuM&;+}H_W%Dow*EhZkAhpl zyMWFBXo5+w704g(O>F@;7ZodpA(&_C9Pck*Of8xq9kLvSIZ0ke##x;pw zoOr6eoTt*i>A3Gv3NGWb{nq&O?AZRvnXBw}7%`-G`+|ub6LqkLgZ#SwO&tEEy+iFA zY<%;_t@Lo`dg{fth)_&NCi{A1!FV@UYL|(UXx7V>R!fRb$qW|0sLUfRL%}e+uDW3 zB}S7DBBL{tiejTvEPDWA@%jfx-cc8R8}WDgf( zrGO|ewHm6PJqHOg0OIkOAsvRC>XTdZ45gu9Om+zxJ=$R%->gY(UxTu6>mcdG%J4-J1_bNQ+cJOTz3;0@GCN#I?3~VQ<;d0*ZPFk&dDmh# zWZ2D4KIY}TgL~=vNtlZUi!y(F&WtcJ3@g&QFz1dO2savo-o{T~!S==R=)Ot9M`Cqm zg2F_KF)L=BMKxultUc~)HJd>pTX{vftO1Qd4(ADnZ{e-9J;1BQgF&mse!2McaHLp+ zsT>)r(uDINNBh&Au1J+`s!wEAZDzmHKtU|BYt}f}&UP3lbVL~@(|sQKqQSDiVTDjL z#lsk1Jz_mw6!1MW#@c*)hq#>(3K231fr6nO()|dbTW2n4->2{dwkhnCvzl7@K;{24 zsiy%LYDg&W->QMzX}2A6%Drs5`V`Rk2YVK~o|+Lgy$VN!0SUTHSz zOUXW3YK=9fD;9EPEpH}oysA0|+Je|hJhOB;Sq{l^fw;28wc&dp_dqTl*M*7=mbqsLZ zHDTqZnSNAKsh; z{glz@a4j-rV$G*Y!xD`oKAQCE&#H!z9hc*xc4ErOE?fUkFz~ddEc-vcUr*=%eF(e{ zyaqf2=={Hnz!q>{@YCSS*!bETumCOscL#roU4JWh1JId&$HArGEO1|NFK`d=aqRoI zgO`FB8!*fFtAXAD5YPU*Ki_p`z@LCu0-aa*a&R>m1NQ`X10TcQe>YIv!0&_Sfd!y_ z1K+{cza4xHd=>mIcp11BTm^Q5v%!zB^*;vQ4Q>U$51tORpWtdBpTNVx!@%dT`TrYu zH+U9!BKSJC{D;7+!HdE3z)e6l{{Z*~cKp}DZ-cjh5ukX1Phzj%23`kV1%4Ae2@HWp zf*)b8e+>K?xEVCTG#CcjZ=iGi?*SA;@FDPe@GS65Fb7ItH&8o29GvRoBf#18DLekS zp@Uv(M`p`9Jhfk!f=fImKzr0x4Jz5C0nA&ZGiSt_fkdLvCGB=ccc#C)=4E0V+<)oC z!&|S_{OGH|{KzNr&=eQ)%ORoCRu?aFO~>W zy$Qi2ELkdXW-Wefj({D?rWj_Ijq~iXaSArOYe8NFBQd2bmeO%qU9IyHis3-AF=Swe z#JeMmv!;zuSVQfwy7-^x!%r<@#vS9{XE>X|!-(@;2zOP}XHp)89hwX?`_qG=MO5TJ zR|c{^)EyV?Z%PPjl5q}8W9%&R6?vdAVs&0<7&?;F!gJ&);gi_FR zmpbK^`r5YU7+yho9jS3;xsY22$diKPe)lD$BIk)0~3%~3HC zWL`PJ&gw{KmJXpY2{!^dN7SOuv)NX6(gDM_0C&EpB9?%jj5ylMP)?x%#(t>bN!DwO zVF?ae8k4q5^+BVP!PO4gURbV`?K~?I(O0d*ily+9BrJ<@+%iI9$?OST?ici#}dd>rRS=PVjz((*WEgRT9IIN8^i^eXqJ+dL*+^Ejy(6>6=nQnbaTs@UC3+c&C{l)uC+K#c&Qq4+ymQq@6SxX~O z{UHO?(+-po`e*o#-RfZ0-3`MtKu<1p5(yDHeB&-cvNpV-IJ2M@+7tFPIzw4Sm& zqmj&UZrkEfd2zam^qBEx(KPW+>YJG+la5H#J4Fkk6hZh)iSdiN3|q zEGUimYZ&6I+Lj%AOg9-((^;;ChOC!ukoI?CnJh_Z*ZTisFa{qYn=ohpe+AqAv*1(U zli-!$M({K+4W_^Vco6toY<|51;Qipu;1=*Ap!Wox1MUUx3BG{M|9PPO0eV;9m0%WdFY%`(A7RQ(zC+4YdCMEo}R5g0BGC{4WK+3SIP#{0Rx4~Dy{{lCI7lLPir-2>dtJwB`3-oTkYry$n1JHSdj|Jbs&Q~13d%?}% z_24<+N^lTd4$cLGK=A;I0T=>$hu@>Yqrf+@`M(T42>uk@3XX#v;1@sweja=s+h6Pa zPX*&(FX#t9#MXZUSOSN^As`>XXJ|*AHSluqQlLEq@&OzMJAvBy!C%{tnH=Hsumq^?xD4{ix#uj?kkAr&P$TwExzjokY($GA4x znRjtwZzdX@*fTaeH90yvIWaXeON79lsqq86NhPEWR&X|v^Og@nn7e0jGIbAu`e8r6 zD1ZsZlXvVA-DJ~gwBsJPZM(0gMfDYzNYvI=sYMrMbJZH5gPq#N3UoZ8GdO-gG~T?&ND)2B(aalYN^->`Mq{^b=08N%^d8|ypr)@zm01U7uS zu8~t4Qe%-jY8Hxg9m32OOYT-n*OTIFE~DVF?MkM>JYB@?3bIa%YkLFRSQ?H4=EW*T zik%iAH?Q{aMfHid?UeNv8IRZSIk<;7b~U7`70mvS)oC?rCs*<0=#TlMLaiYywu96`fi1k%X`bIajO&Jgla<~JhblVOE@mEkw#Vbc`yR&otGo}oN4U73dT*2;SAn`A}t zDyGxv!YI{fSN!DPs9x~)lx&r?tES7q8$B-vcpAGtar^Kpd zlqfePIhzPNFP=dbCYYaq%=YkU)BKrxB*<>=Xe^6}w^s%t&(;!o-l}E9RaKd{&&7Ep zb&+(+xOf@LkQJ2{S2X=qLg>OCQO0jEgH64;;kR>VFNwN`6rxY!8#M=4RRBun_w=!_N?f(+OITx#h_?A1tJ~5 z@^3u#@x}w)y??|W68Wrl)aO)HAVruz)O1D1&=J;~wLHP$NyJbyDVvFCBI805yPlK* z5^K7*?VFYk=k`n)gH0Z~4rL(Rx4cJ)^k$A%z&9Zip}!Jt?Z4@*`KWY_kIQsplXt3q=E35OTM*fThu8#=$c0hFYoehQ37f3V#ak%e zE107vn`ZQEm@@QSj~PQLT;QzMxm?0{AIi(rw61jkOOa}5DnmYNMI1S|Y!dWx;^{Et zU6=yXV(~3DhkKmZ0te-!)w@8MhYOQlaes-`Vhe$^MuvK(J*O8l#u%I|?kJKW2E+}s zHfeT8skyYM7GAnDJuGg9#Z}q<48=2}lSTzJM{!YPP?|^sxJ}@3O;*81z9icG%H-~yoc0DceK{=4AA z;LpHo!E3+~Z~$m8z!vaB?0mfw@GroxgKL1!3D^eo?tp*B&e!<>Hv*j*cmvQm0Ye}G zj{%PcKfvz49o!0@1D*)v6SxG(FYo~HkJ$RRf!_r>NAMPK99#nK3qFgT|61@oa0KWK z!GFQF{~Pev;8Q?*1LOnfT>szBwQb;EvF~pK9|Eri*MP&|=YZk>be3Q|JK!yRe-?Nq zm;-x&;sJgZoCW>{TmOCFW#A>?*T5vW5Ii2-fzAKN;00hOxDeb6d&YQuV`}9%_vaa1_fn_R#`nLre`NTL*_i}Y2-IA14nX}^l zB=cZLZEjI-74b!l!irns87}age`|Svk?Lv23)%;-FCLqk=|?}-GK#tb7lXFEq8YQP z+`eHLO%iK_axGL~wmhemBkZ2?ocG-ZRNGY^ft4MEg*mD? zLAent()O7HrgH-8B05&nhBzCpbDF9yvNrFvjN>5cZ;UQCD&VOcw7 zlNQd_p&FUjUVgW5WI~GUM z;6kixChx4^UT1$)ZMe`2$+a&x(JK1ZUHrKy_uMq6j7K_6EK6{yEbwM6?M+cFBf_z#Sc&5Z73GY*1Ck+PUugQ&XOvYF}&d zF<6|leMJpKd6KA}slB`z-1gLO38GSINV3&D^ABW-lHcptxm4^-K{`gIMCAv*jwQrk z@49Qu35&}_EJua^ajsd-BV&^+6%r7m3BiD;1c! zJBxaBn$U+vCMVyC#)eikPP!3aGm$On5ujA99V;zzD7cqOY5$2*6k1jti+Lvc38yOl znl3eQICDFAa9o^XYwCoYcv4IVY&P08UeO0l(9$Cs=v=k3thtAo)Si2 z_rzJf>FFFha${`j(4pKr8;3DX`&^+QlC7l|ufQ|gtt94oL*|b|PZhM#Rn2^}9y#OT zg$$RWD`@3e^_GJqwYaT2hYi~Et8LJxQw8Tq_|s$b>#=<)HT8Or8$E~R=sTqbejHOa zF8|jIF~LAiv<4r;3K&3AUZ*2Qe39Imh8AQZ7~5)g9F_TVhSh}OZW?8tuHVH|u0n!l z*E`S(H#CezF*Vgd7DIX#M3`z*4>~Db$Cx}gy*G8UOKx!ZgnV-fN5S1L5!DfnzR5Hl z1h4NdwL{U!<}ND%PGlmV3$p8B6dh}-&``zPWEy7&JMlmDerM(6oCt34_^Scv&}Qze z;OGsK=_+KGt5@s)nDBjH4(I|z+YpZzZbk2+yp9M9y}I&1Ka%TKx_GL1g`;?f?@DO zZ1it{_X64N$3O$<9RTNn`-Aslm%jtN6`TZha2nXHW&c|;1k&Ee+b?UPJ*X^dxEcEtG^4p3A`4Z0EfUu;L+f(X*adqmpab^q#YAJ zQ3H*GDE-b}rI~vnYWf<_M7vmu6OA5Ar%KnF`9F7rTOonZrAauvt#Hc&i93#Mvq{@J z+KT6x#d$T;B*W~Nd$!lR<)AY7w6hsM4p}{pe~WC&De9|ns;Iqps^kQbtL`>e=Xx)f z7$l5+He#d^^Ls7A3?;)_Cfnvq(%kwH3n(btnmfglj@_+k1Xz!XP;C$JH095B1@>>> zvVGu`mB>i7s-RS+>Lc2TJ85Iof2>1`RXIQGVWs!iIQ>DfDHv$p);($v<9I_T6i)XL zjTSC!hSF2kCT)_iAheQPgdjeJvAF~n#U{V!ytwGo(In&Xz*dMe{zY}F_>{=#+A$vr zc8hXIGLtUQbg0W|tyZc|&^}!4I~{2>vv6&VnW+xuF^CoR_6#jO63#y`A(p0)uFGkd zTBEx|l{|$5gCWl>Wb=2(#@F#`!5!m5s`$E7*QyUx$WJC#|C;9t;uzYx|RbOtFDrxNGf@N2SS?JgAnieC$wnw}PSSqdUL%;z-KexHvF9v47;rdZCEC-Fff1Q_+#Bu{akE zsoyatefP%S)prhFuj+y|C*!F;YC4?0)0M@AN%%rzkI#oOpjB%$k>>?lQdJHqOVTWd z5?-6Mv8CU`riTs_l0Cd}>*JriId^JYYo@Z)*i$9wU}n$kp7E*au}S2ff~QLhl__+- z{sHY8Q^=C7EvT%+lOr>GXW@Zl?Z&OVFYp|_M~P%>BkLMIPq^k9QFhHWIno+=!ipp$ z@L|-q?@anZRw*lrr0qEWTF9v`@XpHQ(D;Ep6Njdut5=h#2jv5dx)*_nMKuGTd0!pY z*X2zb^wY*RmkowfIH9pRn^YTIsW#Hzs;uOiy!4fx*`sTA$!glF*<&taUT)eEm2=t` zPA})|v0Tobi+tp7E=()dhh6rf7Ov*I7&r1iQznRQA~iC!Fgz7~#c2{zCHwyuu@$GW zCAI$l++hFzAME}A0d(e{_WbVvj|Gne4+sB>4e;yWTCfjX0Cs@C#b@w`K<5LT z0Clh(+zosme}T>lkT2m4;Ps#Zo&+|6yMzCW-{4!|GvJTFd%!EeN$_-VIncR)8^GD% zAMq#L3Z4(H1WyE)fV034Xn(!0@IBz|;3Uww0SAF(V+ySCqd7b1!hXIdgQM;IVjaSF zy2$+X!%Cvp`>2_~#W1+kT*b8WRh004m+d)i+e@&KE*NgBti_l=*9Mm}P13d;N!FcD zn<9I_qK&7StzwK)8orDplhX@5ZZtipNyeXe*2}`*<`#a=`)d`K^SWF(e97M0WS0gu zVY$g@;Z#@1(KQ>SDcBD*O0uJUbd;hmqVh+hP-gWr40UNnIlaQY17ZB{c$tm)DxFU0 zaty-r>^{~XGPSDM5BCjIaPc%*YhI&;O4<=M@ulyBMr1ZyxntjgY?a=r7~MNI`lQ*t zBM0{E8`F-qa1yMMgF>LQdJ;ZyQGhH1ud;HZ>_wk?mc|8qSD!RxYihOf6mL!>G_Vo^ z1k)pb%ykkyJ%z4x%Kn0J7@Q(u3FRlydG0z9|GC}8M80s2dfH(EuZqs2;4#nE*s)rQ z{Ro-KIz^nAp@MHsE>6G!5@TkeLbb9y{jCra_eAz^l6!O6f*<%z?{s z)KKlA*Uv%SGd9{?prv%eGAFl+_T2IT6XwRU(W4_czGr1)H@%a@jvqKv#7^bk8q}Lq zOp^U}GLJbB(S_kkpPOmL>nv)c>nnMOtxF#dq(g#rr`7c?P4q$YiVic23iRa=^T`ByWxwU94LkgU@}%~& zfiJf{!qAXOOV6d^{qp?>4|WeJ$ZSm4lcLs;=R3@a=EbiWxh!M=nH&bZMiO zbY^Pgz%=gzoSvDQxN6oXhRKPEeY3ouy6Xu)(V5C{{*g!dY3axE=@7s$`=%n;A$;L> zm-zJ>K`m|902<0qJJL!l%w#_|1Vy5wGn*!^EV1V=3!hor>$}$aK8mhbrr<#}&S~E4 X8I7VLU*SJlFp83@@A;kVNZtPjn%Mi$ From 0f63837e69b3f620196be6ebdfe583d02a37bc7e Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Mon, 8 Aug 2022 13:25:02 +0800 Subject: [PATCH 16/36] refactor(sync): make leader life longer --- source/dnode/vnode/src/vnd/vnodeSync.c | 2 ++ source/libs/sync/src/syncMain.c | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index a269f81ddd..50ca81e58b 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -722,10 +722,12 @@ void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); } bool vnodeIsLeader(SVnode *pVnode) { if (!syncIsReady(pVnode->sync)) { + vDebug("vgId:%d, vnode not ready", pVnode->config.vgId); return false; } if (!pVnode->restored) { + vDebug("vgId:%d, vnode not restored", pVnode->config.vgId); terrno = TSDB_CODE_APP_NOT_READY; return false; } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 389eda0f8b..3d9b34c34c 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2668,6 +2668,12 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p syncNodeEventLog(ths, "I am not follower, can not do leader transfer"); return 0; } + + if (!ths->restoreFinish) { + syncNodeEventLog(ths, "restore not finish, can not do leader transfer"); + return 0; + } + syncNodeEventLog(ths, "do leader transfer"); bool sameId = syncUtilSameId(&(pSyncLeaderTransfer->newLeaderId), &(ths->myRaftId)); From f15517b1d7f4decd06be411466346c685da39258 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 8 Aug 2022 13:28:49 +0800 Subject: [PATCH 17/36] fix: add checks for stream query --- source/libs/parser/src/parTranslater.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index be16289595..c12c4aa9c6 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4729,15 +4729,8 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt return TSDB_CODE_SUCCESS; } - if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery)) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); - } - - SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; - - if (QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable) || - TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type || - !pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect)) { + if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) || + QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pStmt->pQuery)->pFromTable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); } @@ -4770,12 +4763,23 @@ static int32_t addWstartTsToCreateStreamQuery(SNode* pStmt) { return code; } +static int32_t checkStreamQuery(STranslateContext* pCxt, SSelectStmt* pSelect) { + if (TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type || + !pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); + } + return TSDB_CODE_SUCCESS; +} + static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SCMCreateStreamReq* pReq) { pCxt->createStream = true; int32_t code = addWstartTsToCreateStreamQuery(pStmt); if (TSDB_CODE_SUCCESS == code) { code = translateQuery(pCxt, pStmt); } + if (TSDB_CODE_SUCCESS == code) { + code = checkStreamQuery(pCxt, (SSelectStmt*)pStmt); + } if (TSDB_CODE_SUCCESS == code) { getSourceDatabase(pStmt, pCxt->pParseCxt->acctId, pReq->sourceDB); code = nodesNodeToString(pStmt, false, &pReq->ast, NULL); From 020a656063452290abf5caeeb0587715d418e694 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 8 Aug 2022 13:32:20 +0800 Subject: [PATCH 18/36] fix: fix memory double free issue --- source/client/src/clientHb.c | 1 + source/client/src/tmq.c | 2 +- source/libs/catalog/src/ctgRemote.c | 4 +++- source/libs/catalog/src/ctgUtil.c | 10 +++++++++- 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 06bd3f3887..7031a1ebca 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -286,6 +286,7 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) { if (pInst == NULL || NULL == *pInst) { taosThreadMutexUnlock(&appInfo.mutex); tscError("cluster not exist, key:%s", key); + taosMemoryFree(pMsg->pData); tFreeClientHbBatchRsp(&pRsp); return -1; } diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index f7d45dc6ff..f7d04e67ee 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -1007,7 +1007,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { taosMemoryFree(pParam); if (code != 0) { tscWarn("msg discard from vgId:%d, epoch %d, code:%x", vgId, epoch, code); - if (pMsg->pData) taosMemoryFree(pMsg->pData); + if (pMsg->pData) taosMemoryFreeClear(pMsg->pData); if (code == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) { SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM); if (pRspWrapper == NULL) { diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c index a9f2d426bc..45f97865ce 100644 --- a/source/libs/catalog/src/ctgRemote.c +++ b/source/libs/catalog/src/ctgRemote.c @@ -467,6 +467,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT if (NULL == taosArrayPush(newBatch.pMsgs, &req)) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } + msg = NULL; if (NULL == taosArrayPush(newBatch.pTaskIds, &pTask->taskId)) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } @@ -517,6 +518,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT if (NULL == taosArrayPush(pBatch->pMsgs, &req)) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } + msg = NULL; if (NULL == taosArrayPush(pBatch->pTaskIds, &pTask->taskId)) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } @@ -545,7 +547,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT CTG_ERR_JRET(TSDB_CODE_APP_ERROR); } - tNameGetFullDbName(pName, newBatch.dbFName); + tNameGetFullDbName(pName, pBatch->dbFName); } ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), pBatch->batchId, diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 8e5fb90f1a..1ca60c89cd 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -438,6 +438,14 @@ void ctgFreeMsgCtx(SCtgMsgCtx* pCtx) { } } +void ctgFreeTbMetasMsgCtx(SCtgMsgCtx* pCtx) { + ctgFreeMsgCtx(pCtx); + if (pCtx->lastOut) { + ctgFreeSTableMetaOutput((STableMetaOutput*)pCtx->lastOut); + pCtx->lastOut = NULL; + } +} + void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput) { if (NULL == pOutput) { return; @@ -641,7 +649,7 @@ void ctgFreeTaskCtx(SCtgTask* pTask) { taosArrayDestroy(taskCtx->pFetchs); // NO NEED TO FREE pNames - taosArrayDestroyEx(pTask->msgCtxs, (FDelete)ctgFreeMsgCtx); + taosArrayDestroyEx(pTask->msgCtxs, (FDelete)ctgFreeTbMetasMsgCtx); if (pTask->msgCtx.lastOut) { ctgFreeSTableMetaOutput((STableMetaOutput*)pTask->msgCtx.lastOut); From 07b88a35d515d04abaac1f95deb76c9f2c792694 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 8 Aug 2022 13:44:23 +0800 Subject: [PATCH 19/36] fix: ensure capacity before join left and right --- source/libs/executor/src/joinoperator.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index e96844f1e3..2f9eff50b6 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -256,7 +256,7 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t SArray* rightRowLocations = taosArrayInit(8, sizeof(SRowLocation)); SArray* rightCreatedBlocks = taosArrayInit(8, POINTER_BYTES); - + int32_t code = TSDB_CODE_SUCCESS; mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 0, pJoinInfo->leftCol.slotId, pJoinInfo->pLeft, pJoinInfo->leftPos, timestamp, leftRowLocations, leftCreatedBlocks); mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 1, pJoinInfo->rightCol.slotId, pJoinInfo->pRight, @@ -264,14 +264,20 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t size_t leftNumJoin = taosArrayGetSize(leftRowLocations); size_t rightNumJoin = taosArrayGetSize(rightRowLocations); - for (int32_t i = 0; i < leftNumJoin; ++i) { - for (int32_t j = 0; j < rightNumJoin; ++j) { - SRowLocation* leftRow = taosArrayGet(leftRowLocations, i); - SRowLocation* rightRow = taosArrayGet(rightRowLocations, j); - mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock, - rightRow->pos); - ++*nRows; - } + code = blockDataEnsureCapacity(pRes, *nRows + leftNumJoin * rightNumJoin); + if (code != TSDB_CODE_SUCCESS) { + qError("%s can not ensure block capacity for join. left: %zu, right: %zu", GET_TASKID(pOperator->pTaskInfo), leftNumJoin, rightNumJoin); + } + if (code == TSDB_CODE_SUCCESS) { + for (int32_t i = 0; i < leftNumJoin; ++i) { + for (int32_t j = 0; j < rightNumJoin; ++j) { + SRowLocation *leftRow = taosArrayGet(leftRowLocations, i); + SRowLocation *rightRow = taosArrayGet(rightRowLocations, j); + mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock, + rightRow->pos); + ++*nRows; + } + } } for (int i = 0; i < taosArrayGetSize(rightCreatedBlocks); ++i) { From b23fe53327ae95879d99cc888b88abede1aa594b Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Mon, 8 Aug 2022 13:56:30 +0800 Subject: [PATCH 20/36] Update 03-package.md --- docs/zh/05-get-started/03-package.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index 6dbf74f8bc..1cdfe20c8d 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -11,7 +11,7 @@ import TabItem from "@theme/TabItem"; ::: -TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包。 +TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包。也支持通过 `apt-get` 工具从线上进行安装。 ## 安装 @@ -293,4 +293,4 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group ```sql taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); -``` \ No newline at end of file +``` From d9b8417d7b0791080096f684bfe381b9cc3c97d3 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Mon, 8 Aug 2022 14:01:31 +0800 Subject: [PATCH 21/36] refactor(sync): make leader life longer --- source/libs/sync/src/syncMain.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 3d9b34c34c..cdd729999f 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2674,7 +2674,16 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p return 0; } - syncNodeEventLog(ths, "do leader transfer"); + if (ths->vgId > 1) { + syncNodeEventLog(ths, "I am vnode, can not do leader transfer"); + return 0; + } + + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "do leader transfer, index:%ld", pEntry->index); + syncNodeEventLog(ths, logBuf); + } while (0); bool sameId = syncUtilSameId(&(pSyncLeaderTransfer->newLeaderId), &(ths->myRaftId)); bool sameNodeInfo = strcmp(pSyncLeaderTransfer->newNodeInfo.nodeFqdn, ths->myNodeInfo.nodeFqdn) == 0 && From f8a233442effb622cb531e2ce3e4c6f08e4723f8 Mon Sep 17 00:00:00 2001 From: cpwu Date: Mon, 8 Aug 2022 14:30:54 +0800 Subject: [PATCH 22/36] fix case --- tests/system-test/2-query/distribute_agg_spread.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py index 318f31f9a1..c8957f216d 100644 --- a/tests/system-test/2-query/distribute_agg_spread.py +++ b/tests/system-test/2-query/distribute_agg_spread.py @@ -188,7 +188,7 @@ class TDTestCase: def distribute_agg_query(self, dbname="testdb"): # basic filter tdSql.query(f"select spread(c1) from {dbname}.stb1 where c1 is null") - tdSql.checkRows(0) + tdSql.checkRows(1) tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1=1") tdSql.checkData(0,0,8.000000000) From 7a559084ae1cdb33bbbf60531fc066f908b37e58 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 8 Aug 2022 14:38:04 +0800 Subject: [PATCH 23/36] fix: add checks for stream query --- source/libs/parser/src/parTranslater.c | 4 +- tests/script/tsim/stream/drop_stream.sim | 88 ++++++++++++------------ 2 files changed, 45 insertions(+), 47 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index c12c4aa9c6..5c49a6e0ab 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2515,7 +2515,7 @@ static bool isPartitionByTbname(SNodeList* pPartitionByList) { return false; } SNode* pPartKey = nodesListGetNode(pPartitionByList, 0); - return QUERY_NODE_FUNCTION != nodeType(pPartKey) || FUNCTION_TYPE_TBNAME != ((SFunctionNode*)pPartKey)->funcType; + return QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPartKey)->funcType; } static int32_t checkStateWindowForStream(STranslateContext* pCxt, SSelectStmt* pSelect) { @@ -2566,7 +2566,6 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pWindow) { return TSDB_CODE_SUCCESS; } - pSelect->isTimeLineResult = true; pCxt->currClause = SQL_CLAUSE_WINDOW; int32_t code = translateExpr(pCxt, &pSelect->pWindow); if (TSDB_CODE_SUCCESS == code) { @@ -2637,7 +2636,6 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelec if (NULL == pSelect->pPartitionByList) { return TSDB_CODE_SUCCESS; } - pSelect->isTimeLineResult = false; pCxt->currClause = SQL_CLAUSE_PARTITION_BY; return translateExprList(pCxt, pSelect->pPartitionByList); } diff --git a/tests/script/tsim/stream/drop_stream.sim b/tests/script/tsim/stream/drop_stream.sim index bdd88bf780..747f59fe85 100644 --- a/tests/script/tsim/stream/drop_stream.sim +++ b/tests/script/tsim/stream/drop_stream.sim @@ -45,70 +45,70 @@ sql create table test.scalar_function_tb1 (ts timestamp, c1 tinyint, c2 smallint sql create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int); sql create table scalar_ct1 using scalar_stb tags(10); sql create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)); -sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb; +sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_abs_stream trigger at_once into output_abs_ctb as select ts, abs(c1), abs(c2), c3 from scalar_ct1; sql create stream tb_abs_stream trigger at_once into output_abs_tb as select ts, abs(c1), abs(c2), c3 from scalar_tb; -sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb; +sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_acos_stream trigger at_once into output_acos_ctb as select ts, acos(c1), acos(c2), c3 from scalar_ct1; sql create stream tb_acos_stream trigger at_once into output_acos_tb as select ts, acos(c1), acos(c2), c3 from scalar_tb; -sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb; +sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1; sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb; -sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb; +sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1; sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb; -sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb; +sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1; sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb; -sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb; +sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1; sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb; -sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb; +sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1; sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb; -sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb; +sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname; sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1; sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb; -sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb; +sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname; sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1; sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb; -sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb; +sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1; sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb; -sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb; +sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1; sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb; -sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb; +sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1; sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb; -sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb; +sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1; sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb; -sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb; +sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname; sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1; sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb; -sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb; +sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname; sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1; sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb; -sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb; +sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname; sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1; sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb; -sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb; +sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname; sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1; sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb; -sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb; +sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname; sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1; sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb; -sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb; +sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname; sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1; sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb; -sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb; +sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname; sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1; sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb; -sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb; +sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname; sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1; sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb; -sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb; +sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname; sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1; sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb; sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos"); @@ -136,70 +136,70 @@ sql create table test.scalar_function_tb1 (ts timestamp, c1 tinyint, c2 smallint sql create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int); sql create table scalar_ct1 using scalar_stb tags(10); sql create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)); -sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb; +sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_abs_stream trigger at_once into output_abs_ctb as select ts, abs(c1), abs(c2), c3 from scalar_ct1; sql create stream tb_abs_stream trigger at_once into output_abs_tb as select ts, abs(c1), abs(c2), c3 from scalar_tb; -sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb; +sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_acos_stream trigger at_once into output_acos_ctb as select ts, acos(c1), acos(c2), c3 from scalar_ct1; sql create stream tb_acos_stream trigger at_once into output_acos_tb as select ts, acos(c1), acos(c2), c3 from scalar_tb; -sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb; +sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1; sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb; -sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb; +sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1; sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb; -sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb; +sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1; sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb; -sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb; +sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1; sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb; -sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb; +sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1; sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb; -sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb; +sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname; sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1; sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb; -sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb; +sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname; sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1; sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb; -sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb; +sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1; sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb; -sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb; +sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1; sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb; -sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb; +sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1; sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb; -sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb; +sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1; sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb; -sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb; +sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname; sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1; sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb; -sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb; +sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname; sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1; sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb; -sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb; +sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname; sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1; sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb; -sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb; +sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname; sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1; sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb; -sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb; +sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname; sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1; sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb; -sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb; +sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname; sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1; sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb; -sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb; +sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname; sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1; sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb; -sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb; +sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname; sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1; sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb; -sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb; +sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname; sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1; sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb; sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos"); From 40d1ff775aa8aee8545be45b84a163ffdfd7469f Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 8 Aug 2022 15:04:02 +0800 Subject: [PATCH 24/36] build: add win package --- packaging/release.sh | 2 +- source/common/src/systable.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/release.sh b/packaging/release.sh index 09781dbe8e..2452ee1813 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -216,7 +216,7 @@ else fi # check support cpu type -if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]]; then +if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]]; then if [ "$verMode" != "cluster" ]; then # community-version compile cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} diff --git a/source/common/src/systable.c b/source/common/src/systable.c index a79082ab23..871a6e5210 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -24,7 +24,7 @@ #define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE) static const SSysDbTableSchema dnodesSchema[] = { - {.name = "id", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_SMALLINT}, {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, @@ -66,7 +66,7 @@ static const SSysDbTableSchema bnodesSchema[] = { }; static const SSysDbTableSchema clusterSchema[] = { - {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_BIGINT}, {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, }; From bb0ce39511c8a4eb2ce1b4a03fb16ac3c3ffe3b2 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 8 Aug 2022 13:48:09 +0800 Subject: [PATCH 25/36] refactor(stream): add stream meta --- include/libs/stream/tstream.h | 6 +- source/dnode/vnode/src/inc/tq.h | 14 ++--- source/dnode/vnode/src/tq/tq.c | 89 +++++++++++++---------------- source/dnode/vnode/src/tq/tqPush.c | 2 +- source/dnode/vnode/src/tq/tqRead.c | 2 +- source/libs/stream/src/streamMeta.c | 62 ++++++++++++++++++++ 6 files changed, 116 insertions(+), 59 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 239fcdad8d..842e656b9d 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -476,8 +476,10 @@ typedef struct SStreamMeta { SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc); void streamMetaClose(SStreamMeta* streamMeta); -int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask); -int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); +int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask); +int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLen); +int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); +SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId); int32_t streamMetaBegin(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index c093b2cd5d..a1dba41c94 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -117,10 +117,9 @@ typedef struct { struct STQ { SVnode* pVnode; char* path; - SHashObj* pushMgr; // consumerId -> STqHandle* - SHashObj* handles; // subKey -> STqHandle - SHashObj* pStreamTasks; // taksId -> SStreamTask - SHashObj* pAlterInfo; // topic -> SAlterCheckInfo + SHashObj* pushMgr; // consumerId -> STqHandle* + SHashObj* handles; // subKey -> STqHandle + SHashObj* pAlterInfo; // topic -> SAlterCheckInfo STqOffsetStore* pOffsetStore; @@ -129,9 +128,7 @@ struct STQ { TTB* pAlterInfoStore; - TDB* pStreamStore; - TTB* pTaskDb; - TTB* pTaskState; + SStreamMeta* pStreamMeta; }; typedef struct { @@ -188,6 +185,9 @@ static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ve pOffsetVal->version = ver; } +// tqStream +int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask); + #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 62e37f048e..c1c680fc56 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -62,8 +62,6 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); - pTq->pStreamTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); pTq->pAlterInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); @@ -76,6 +74,11 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { ASSERT(0); } + pTq->pStreamMeta = streamMetaOpen(path, pTq, (FTaskExpand*)tqExpandTask); + if (pTq->pStreamMeta == NULL) { + ASSERT(0); + } + return pTq; } @@ -83,18 +86,11 @@ void tqClose(STQ* pTq) { if (pTq) { tqOffsetClose(pTq->pOffsetStore); taosHashCleanup(pTq->handles); - void* pIter = NULL; - while (1) { - pIter = taosHashIterate(pTq->pStreamTasks, pIter); - if (pIter == NULL) break; - SStreamTask* pTask = *(SStreamTask**)pIter; - tFreeSStreamTask(pTask); - } - taosHashCleanup(pTq->pStreamTasks); taosHashCleanup(pTq->pushMgr); taosHashCleanup(pTq->pAlterInfo); taosMemoryFree(pTq->path); tqMetaClose(pTq); + streamMetaClose(pTq->pStreamMeta); taosMemoryFree(pTq); } } @@ -672,6 +668,9 @@ FAIL: } int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) { + // + return streamMetaAddSerializedTask(pTq->pStreamMeta, msg, msgLen); +#if 0 SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); if (pTask == NULL) { return -1; @@ -695,6 +694,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) { FAIL: if (pTask) taosMemoryFree(pTask); return -1; +#endif } int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) { @@ -710,7 +710,7 @@ int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) { } while (1) { - pIter = taosHashIterate(pTq->pStreamTasks, pIter); + pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter); if (pIter == NULL) break; SStreamTask* pTask = *(SStreamTask**)pIter; if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue; @@ -744,9 +744,9 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { // SStreamTaskRunReq* pReq = pMsg->pCont; int32_t taskId = pReq->taskId; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - if (ppTask) { - streamProcessRunReq(*ppTask); + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId); + if (pTask) { + streamProcessRunReq(pTask); return 0; } else { return -1; @@ -762,14 +762,15 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) { SDecoder decoder; tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen); tDecodeStreamDispatchReq(&decoder, &req); - int32_t taskId = req.taskId; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - if (ppTask) { + int32_t taskId = req.taskId; + + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId); + if (pTask) { SRpcMsg rsp = { .info = pMsg->info, .code = 0, }; - streamProcessDispatchReq(*ppTask, &req, &rsp, exec); + streamProcessDispatchReq(pTask, &req, &rsp, exec); return 0; } else { return -1; @@ -779,9 +780,9 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) { int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) { SStreamTaskRecoverReq* pReq = pMsg->pCont; int32_t taskId = pReq->taskId; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - if (ppTask) { - streamProcessRecoverReq(*ppTask, pReq, pMsg); + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId); + if (pTask) { + streamProcessRecoverReq(pTask, pReq, pMsg); return 0; } else { return -1; @@ -791,9 +792,9 @@ int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t taskId = pRsp->taskId; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - if (ppTask) { - streamProcessDispatchRsp(*ppTask, pRsp); + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId); + if (pTask) { + streamProcessDispatchRsp(pTask, pRsp); return 0; } else { return -1; @@ -803,9 +804,10 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) { SStreamTaskRecoverRsp* pRsp = pMsg->pCont; int32_t taskId = pRsp->rspTaskId; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - if (ppTask) { - streamProcessRecoverRsp(*ppTask, pRsp); + + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId); + if (pTask) { + streamProcessRecoverRsp(pTask, pRsp); return 0; } else { return -1; @@ -815,18 +817,7 @@ int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) { SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &pReq->taskId, sizeof(int32_t)); - if (ppTask) { - SStreamTask* pTask = *ppTask; - taosHashRemove(pTq->pStreamTasks, &pReq->taskId, sizeof(int32_t)); - atomic_store_8(&pTask->taskStatus, TASK_STATUS__DROPPING); - } - // todo - // clear queue - // push drop req into queue - // launch exec to free memory - // remove from hash - return 0; + return streamMetaRemoveTask(pTq->pStreamMeta, pReq->taskId); } int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) { @@ -837,18 +828,18 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) { SDecoder decoder; tDecoderInit(&decoder, msgBody, msgLen); tDecodeStreamRetrieveReq(&decoder, &req); - int32_t taskId = req.dstTaskId; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - if (ppTask) { + int32_t taskId = req.dstTaskId; + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId); + if (pTask) { SRpcMsg rsp = { .info = pMsg->info, .code = 0, }; - streamProcessRetrieveReq(*ppTask, &req, &rsp); + streamProcessRetrieveReq(pTask, &req, &rsp); + return 0; } else { return -1; } - return 0; } int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) { @@ -871,16 +862,18 @@ void vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { goto FAIL; } - int32_t taskId = req.taskId; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - if (ppTask) { + int32_t taskId = req.taskId; + + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId); + if (pTask) { SRpcMsg rsp = { .info = pMsg->info, .code = 0, }; - streamProcessDispatchReq(*ppTask, &req, &rsp, false); + streamProcessDispatchReq(pTask, &req, &rsp, false); return; } + FAIL: if (pMsg->info.handle == NULL) return; SRpcMsg rsp = { diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index 1c48ef7535..ae3fef9b4b 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -215,7 +215,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) walApplyVer(pTq->pVnode->pWal, ver); if (msgType == TDMT_VND_SUBMIT) { - if (taosHashGetSize(pTq->pStreamTasks) == 0) return 0; + if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0; void* data = taosMemoryMalloc(msgLen); if (data == NULL) { diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 5017893853..6ce8dbe5d9 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -413,7 +413,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { } } while (1) { - pIter = taosHashIterate(pTq->pStreamTasks, pIter); + pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter); if (pIter == NULL) break; SStreamTask* pTask = *(SStreamTask**)pIter; if (pTask->taskLevel == TASK_LEVEL__SOURCE) { diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 085a0e4ce7..8faa22d643 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -36,8 +36,18 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF goto _err; } + pMeta->pTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + if (pMeta->pTasks == NULL) { + goto _err; + } + + if (streamMetaBegin(pMeta) < 0) { + goto _err; + } + pMeta->ahandle = ahandle; pMeta->expandFunc = expandFunc; + return pMeta; _err: return NULL; @@ -48,6 +58,48 @@ void streamMetaClose(SStreamMeta* pMeta) { tdbTbClose(pMeta->pTaskDb); tdbTbClose(pMeta->pStateDb); tdbClose(pMeta->db); + + void* pIter = NULL; + while (1) { + pIter = taosHashIterate(pMeta->pTasks, pIter); + if (pIter == NULL) break; + SStreamTask* pTask = *(SStreamTask**)pIter; + tFreeSStreamTask(pTask); + } + taosHashCleanup(pMeta->pTasks); + taosMemoryFree(pMeta->path); + taosMemoryFree(pMeta); +} + +int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLen) { + SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); + if (pTask == NULL) { + return -1; + } + SDecoder decoder; + tDecoderInit(&decoder, (uint8_t*)msg, msgLen); + if (tDecodeSStreamTask(&decoder, pTask) < 0) { + ASSERT(0); + goto FAIL; + } + tDecoderClear(&decoder); + + if (pMeta->expandFunc(pMeta->ahandle, pTask) < 0) { + ASSERT(0); + goto FAIL; + } + + taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); + + if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), msg, msgLen, &pMeta->txn) < 0) { + ASSERT(0); + return -1; + } + return 0; + +FAIL: + if (pTask) taosMemoryFree(pTask); + return -1; } int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask) { @@ -80,6 +132,16 @@ int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask) { return 0; } +SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId) { + SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); + if (ppTask) { + ASSERT((*ppTask)->taskId == taskId); + return *ppTask; + } else { + return NULL; + } +} + int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) { SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); if (ppTask) { From 62b07dbe5d8724294bc8116a269cb47247c4938a Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Mon, 8 Aug 2022 15:10:32 +0800 Subject: [PATCH 26/36] refactor(sync): speed up replicate --- source/libs/sync/inc/syncInt.h | 2 ++ source/libs/sync/src/syncMain.c | 17 ++++++++++++++--- source/libs/sync/src/syncReplication.c | 25 +++++++++++++++++++++++-- 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 82399f52b9..250b294c19 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -192,9 +192,11 @@ int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms); int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode); int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeStartHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms); int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeRestartNowHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms); // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index cdd729999f..5395b72e27 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -1322,10 +1322,10 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) { return ret; } -int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) { +int32_t syncNodeStartHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms) { int32_t ret = 0; if (syncEnvIsStart()) { - taosTmrReset(pSyncNode->FpHeartbeatTimerCB, 1, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pHeartbeatTimer); + taosTmrReset(pSyncNode->FpHeartbeatTimerCB, ms, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pHeartbeatTimer); atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser); } else { sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId); @@ -1333,13 +1333,18 @@ int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) { do { char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "start heartbeat timer, ms:%d", 1); + snprintf(logBuf, sizeof(logBuf), "start heartbeat timer, ms:%d", ms); syncNodeEventLog(pSyncNode, logBuf); } while (0); return ret; } +int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) { + int32_t ret = syncNodeStartHeartbeatTimerMS(pSyncNode, 1); + return ret; +} + int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) { int32_t ret = 0; atomic_add_fetch_64(&pSyncNode->heartbeatTimerLogicClockUser, 1); @@ -1363,6 +1368,12 @@ int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode) { return 0; } +int32_t syncNodeRestartNowHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms) { + syncNodeStopHeartbeatTimer(pSyncNode); + syncNodeStartHeartbeatTimerMS(pSyncNode, ms); + return 0; +} + // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg) { SEpSet epSet; diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index bc703e519c..1a147e391d 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -200,9 +200,23 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { // send msg syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg); syncAppendEntriesBatchDestroy(pMsg); + + // speed up + if (pMsg->dataCount > 0 && pMsg->prevLogIndex < pSyncNode->commitIndex) { + ret = 1; + + do { + char logBuf[128]; + char host[64]; + uint16_t port; + syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port); + snprintf(logBuf, sizeof(logBuf), "speed up for %s:%d, pre-index:%ld", host, port, pMsg->prevLogIndex); + syncNodeEventLog(pSyncNode, logBuf); + } while (0); + } } - return 0; + return ret; } int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) { @@ -309,7 +323,14 @@ int32_t syncNodeReplicate(SSyncNode* pSyncNode) { break; } - syncNodeRestartHeartbeatTimer(pSyncNode); + if (ret > 0) { + // speed up replicate + int32_t ms = pSyncNode->heartbeatTimerMS < 50 ? pSyncNode->heartbeatTimerMS : 50; + syncNodeRestartNowHeartbeatTimerMS(pSyncNode, ms); + + } else { + syncNodeRestartHeartbeatTimer(pSyncNode); + } return ret; } From 51f0cd3952ee2fea43215b8351d354f1ba93085c Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 8 Aug 2022 15:21:47 +0800 Subject: [PATCH 27/36] enh: adjust the content of the query data chapter --- docs/zh/07-develop/04-query-data/index.mdx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx index 824f36ef2f..eecda92744 100644 --- a/docs/zh/07-develop/04-query-data/index.mdx +++ b/docs/zh/07-develop/04-query-data/index.mdx @@ -25,6 +25,7 @@ TDengine 采用 SQL 作为查询语言。应用程序可以通过 REST API 或 - 单列、多列数据查询 - 标签和数值的多种过滤条件:>, <, =, <\>, like 等 - 聚合结果的分组(Group by)、排序(Order by)、约束输出(Limit/Offset) +- 时间窗口(Interval)、会话窗口(Session)和状态窗口(State_window)等窗口切分聚合查询 - 数值列及聚合结果的四则运算 - 时间戳对齐的连接查询(Join Query: 隐式连接)操作 - 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff 等 @@ -40,7 +41,7 @@ taos> select * from d1001 where voltage > 215 order by ts desc limit 2; Query OK, 2 row(s) in set (0.001100s) ``` -为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。TDengine 还支持连续查询。 +为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。 具体的查询语法请看 [TAOS SQL 的数据查询](/taos-sql/select) 章节。 @@ -73,7 +74,7 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - Query OK, 1 row(s) in set (0.002136s) ``` -TDengine 仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在 [TAOS SQL 的数据查询](/taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 +在 [TAOS SQL 的数据查询](/taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 ## 降采样查询、插值 From e9f79254fbad1c09105001efa331a1fb822c9071 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 8 Aug 2022 15:23:09 +0800 Subject: [PATCH 28/36] fix: merge spread into output when input spread has result --- include/common/ttypes.h | 2 -- include/libs/function/function.h | 2 +- source/libs/function/src/builtinsimpl.c | 11 ++++++++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/include/common/ttypes.h b/include/common/ttypes.h index 4b3e11f947..ceb3eae033 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -354,8 +354,6 @@ void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type); void *getDataMin(int32_t type); void *getDataMax(int32_t type); -#define SET_DOUBLE_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_DOUBLE_NULL) -#define SET_BIGINT_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_BIGINT_NULL) #ifdef __cplusplus } diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 8fa63bbd45..72732ee198 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -67,7 +67,7 @@ typedef struct SResultRowEntryInfo { bool initialized:1; // output buffer has been initialized bool complete:1; // query has completed uint8_t isNullRes:6; // the result is null - uint16_t numOfRes; // num of output result in current buffer + uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT } SResultRowEntryInfo; // determine the real data need to calculated the result diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index e3e98a6895..9aa7c11fce 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -3845,14 +3845,17 @@ int32_t spreadFunctionMerge(SqlFunctionCtx* pCtx) { SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); int32_t start = pInput->startRowIndex; - for (int32_t i = start; i < start + pInput->numOfRows; ++i) { char* data = colDataGetData(pCol, i); SSpreadInfo* pInputInfo = (SSpreadInfo*)varDataVal(data); - spreadTransferInfo(pInputInfo, pInfo); + if (pInputInfo->hasResult) { + spreadTransferInfo(pInputInfo, pInfo); + } } - SET_VAL(GET_RES_INFO(pCtx), 1, 1); + if (pInfo->hasResult) { + GET_RES_INFO(pCtx)->numOfRes = 1; + } return TSDB_CODE_SUCCESS; } @@ -3861,6 +3864,8 @@ int32_t spreadFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); if (pInfo->hasResult == true) { SET_DOUBLE_VAL(&pInfo->result, pInfo->max - pInfo->min); + } else { + GET_RES_INFO(pCtx)->isNullRes = 1; } return functionFinalize(pCtx, pBlock); } From ef607e5c3f0dc17c6f730e3cc0e0e53a47fdddfe Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 8 Aug 2022 15:49:31 +0800 Subject: [PATCH 29/36] fix: sql command 'show cluster' error --- source/client/src/clientMain.c | 9 +++++++-- source/libs/parser/src/parAstParser.c | 7 +++++++ source/libs/parser/test/mockCatalog.cpp | 6 ++++++ source/libs/parser/test/parShowToUse.cpp | 9 +++++++++ 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 6785390952..272d71715a 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -657,12 +657,17 @@ typedef struct SqlParseWrapper { SQuery *pQuery; } SqlParseWrapper; +static void destoryTablesReq(void *p) { + STablesReq *pRes = (STablesReq *)p; + taosArrayDestroy(pRes->pTables); +} + static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { taosArrayDestroy(pWrapper->catalogReq.pDbVgroup); taosArrayDestroy(pWrapper->catalogReq.pDbCfg); taosArrayDestroy(pWrapper->catalogReq.pDbInfo); - taosArrayDestroy(pWrapper->catalogReq.pTableMeta); - taosArrayDestroy(pWrapper->catalogReq.pTableHash); + taosArrayDestroyEx(pWrapper->catalogReq.pTableMeta, destoryTablesReq); + taosArrayDestroyEx(pWrapper->catalogReq.pTableHash, destoryTablesReq); taosArrayDestroy(pWrapper->catalogReq.pUdf); taosArrayDestroy(pWrapper->catalogReq.pIndex); taosArrayDestroy(pWrapper->catalogReq.pUser); diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index f5f44da9db..08fcdcb0cb 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -339,6 +339,11 @@ static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pCxt->pMetaCache); } +static int32_t collectMetaKeyFromShowCluster(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, + pCxt->pMetaCache); +} + static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DATABASES, pCxt->pMetaCache); @@ -547,6 +552,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_BNODES_STMT: return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_CLUSTER_STMT: + return collectMetaKeyFromShowCluster(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_DATABASES_STMT: return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_FUNCTIONS_STMT: diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index 6834f9ccca..ad491af105 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -119,6 +119,12 @@ void generateInformationSchema(MockCatalogService* mcs) { .addColumn("dnode_id", TSDB_DATA_TYPE_INT); builder.done(); } + { + ITableBuilder& builder = + mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, TSDB_SYSTEM_TABLE, 1) + .addColumn("id", TSDB_DATA_TYPE_BIGINT); + builder.done(); + } } void generatePerformanceSchema(MockCatalogService* mcs) { diff --git a/source/libs/parser/test/parShowToUse.cpp b/source/libs/parser/test/parShowToUse.cpp index 7af6d8350c..6590378565 100644 --- a/source/libs/parser/test/parShowToUse.cpp +++ b/source/libs/parser/test/parShowToUse.cpp @@ -25,6 +25,15 @@ class ParserShowToUseTest : public ParserDdlTest {}; // todo SHOW apps // todo SHOW connections +TEST_F(ParserShowToUseTest, showCluster) { + useDb("root", "test"); + + setCheckDdlFunc( + [&](const SQuery* pQuery, ParserStage stage) { ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_SELECT_STMT); }); + + run("SHOW CLUSTER"); +} + TEST_F(ParserShowToUseTest, showConsumers) { useDb("root", "test"); From 80e152619569d274ef1e442823a56d7fa7117cd2 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 8 Aug 2022 16:08:07 +0800 Subject: [PATCH 30/36] fix: fix clone table meta issue --- source/libs/qcom/src/queryUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index 4cad6a078b..41333e7756 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -415,7 +415,7 @@ int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) { return TSDB_CODE_SUCCESS; } - int32_t metaSize = (pSrc->tableInfo.numOfColumns + pSrc->tableInfo.numOfTags) * sizeof(SSchema); + int32_t metaSize = sizeof(STableMeta) + (pSrc->tableInfo.numOfColumns + pSrc->tableInfo.numOfTags) * sizeof(SSchema); *pDst = taosMemoryMalloc(metaSize); if (NULL == *pDst) { return TSDB_CODE_TSC_OUT_OF_MEMORY; From aa81107a581452d3d346a4f415bc78a95bb524cc Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 8 Aug 2022 16:08:58 +0800 Subject: [PATCH 31/36] fix: fix table meta size calculation error --- source/libs/qcom/src/queryUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index 4cad6a078b..41333e7756 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -415,7 +415,7 @@ int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) { return TSDB_CODE_SUCCESS; } - int32_t metaSize = (pSrc->tableInfo.numOfColumns + pSrc->tableInfo.numOfTags) * sizeof(SSchema); + int32_t metaSize = sizeof(STableMeta) + (pSrc->tableInfo.numOfColumns + pSrc->tableInfo.numOfTags) * sizeof(SSchema); *pDst = taosMemoryMalloc(metaSize); if (NULL == *pDst) { return TSDB_CODE_TSC_OUT_OF_MEMORY; From 875eabdbf5262e158e5d313fe4e24198285459bd Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Mon, 8 Aug 2022 16:18:05 +0800 Subject: [PATCH 32/36] fix: send rsma fetch msg to designated vg --- include/common/tmsg.h | 3 -- include/util/taoserror.h | 2 + source/dnode/vnode/src/sma/smaRollup.c | 40 ++++++++++++++++--- source/util/src/terror.c | 2 + .../system-test/1-insert/create_retentions.py | 2 +- 5 files changed, 39 insertions(+), 10 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 716f51933e..b32129bfd7 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2658,7 +2658,6 @@ typedef struct { } SVgEpSet; typedef struct { - int64_t refId; int64_t suid; int8_t level; } SRSmaFetchMsg; @@ -2666,7 +2665,6 @@ typedef struct { static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFetchMsg* pReq) { if (tStartEncode(pCoder) < 0) return -1; - if (tEncodeI64(pCoder, pReq->refId) < 0) return -1; if (tEncodeI64(pCoder, pReq->suid) < 0) return -1; if (tEncodeI8(pCoder, pReq->level) < 0) return -1; @@ -2677,7 +2675,6 @@ static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFe static FORCE_INLINE int32_t tDecodeSRSmaFetchMsg(SDecoder* pCoder, SRSmaFetchMsg* pReq) { if (tStartDecode(pCoder) < 0) return -1; - if (tDecodeI64(pCoder, &pReq->refId) < 0) return -1; if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1; if (tDecodeI8(pCoder, &pReq->level) < 0) return -1; diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 27fb057b44..eab6e5561f 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -610,6 +610,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152) #define TSDB_CODE_RSMA_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x3153) #define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154) +#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155) +#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156) //index #define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200) diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 6b882251f4..662558529d 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -1389,7 +1389,7 @@ _end: * @return int32_t */ int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) { - SRSmaFetchMsg fetchMsg = {.refId = pInfo->refId, .suid = pInfo->suid, .level = level}; + SRSmaFetchMsg fetchMsg = { .suid = pInfo->suid, .level = level}; int32_t ret = 0; int32_t contLen = 0; SEncoder encoder = {0}; @@ -1400,13 +1400,17 @@ int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) { goto _err; } - void *pBuf = rpcMallocCont(contLen); - tEncoderInit(&encoder, pBuf, contLen); + void *pBuf = rpcMallocCont(contLen + sizeof(SMsgHead)); + tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), contLen); if (tEncodeSRSmaFetchMsg(&encoder, &fetchMsg) < 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; tEncoderClear(&encoder); } tEncoderClear(&encoder); + + ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma); + ((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead); + SRpcMsg rpcMsg = { .code = 0, .msgType = TDMT_VND_FETCH_RSMA, @@ -1415,24 +1419,42 @@ int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) { }; if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, FETCH_QUEUE, &rpcMsg)) != 0) { - smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%d level:%" PRIi8 " since %s", + smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma), pInfo->suid, level, terrstr()); goto _err; } + smaDebug("vgId:%d, success to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), + pInfo->suid, level); + return TSDB_CODE_SUCCESS; _err: return TSDB_CODE_FAILED; } +/** + * @brief fetch rsma data of level 2/3 and submit + * + * @param pSma + * @param pMsg + * @return int32_t + */ int32_t smaProcessFetch(SSma *pSma, void *pMsg) { SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg; SRSmaFetchMsg req = {0}; SDecoder decoder = {0}; + void *pBuf = NULL; SRSmaInfo *pInfo = NULL; SRSmaInfoItem *pItem = NULL; - tDecoderInit(&decoder, pRpcMsg->pCont, pRpcMsg->contLen); + if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) { + terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP; + return -1; + } + + pBuf = POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead)); + + tDecoderInit(&decoder, pBuf, pRpcMsg->contLen); if (tDecodeSRSmaFetchMsg(&decoder, &req) < 0) { terrno = TSDB_CODE_INVALID_MSG; goto _err; @@ -1440,7 +1462,11 @@ int32_t smaProcessFetch(SSma *pSma, void *pMsg) { pInfo = tdAcquireRSmaInfoBySuid(pSma, req.suid); if (!pInfo) { - smaDebug("vgId:%d, failed to process rsma fetch msg since Empty rsma info", SMA_VID(pSma)); + if (terrno == TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_RSMA_EMPTY_INFO; + } + smaWarn("vgId:%d, failed to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma), + req.suid, req.level, terrstr()); goto _err; } @@ -1459,6 +1485,8 @@ int32_t smaProcessFetch(SSma *pSma, void *pMsg) { tdReleaseRSmaInfo(pSma, pInfo); tDecoderClear(&decoder); + smaDebug("vgId:%d, success to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), req.suid, + req.level); return TSDB_CODE_SUCCESS; _err: tdReleaseRSmaInfo(pSma, pInfo); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 4780d85a30..3c31c893d1 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -614,6 +614,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_STAT, "Invalid rsma state" TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_QTASKINFO_CREATE, "Rsma qtaskinfo creation error") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FILE_CORRUPTED, "Rsma file corrupted") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REMOVE_EXISTS, "Rsma remove exists") +TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP, "Rsma fetch msg is messed up") +TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_EMPTY_INFO, "Rsma info is empty") //index TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding") diff --git a/tests/system-test/1-insert/create_retentions.py b/tests/system-test/1-insert/create_retentions.py index a2c2254820..f8b2ca71b8 100644 --- a/tests/system-test/1-insert/create_retentions.py +++ b/tests/system-test/1-insert/create_retentions.py @@ -187,7 +187,7 @@ class TDTestCase: tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )') def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, rsma=False, rsma_type="sum"): - tdLog.printNoPrefix("==========step: start inser data into tables now.....") + tdLog.printNoPrefix("==========step: start insert data into tables now.....") # from ...pytest.util.common import DataSet data = DataSet() data.get_order_set(rows) From 8e3005bc6adcbb5017963f1b57c440b7b7e3628e Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Mon, 8 Aug 2022 16:23:13 +0800 Subject: [PATCH 33/36] test: adjust the sleep time for rsma fetch --- tests/system-test/1-insert/create_retentions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/1-insert/create_retentions.py b/tests/system-test/1-insert/create_retentions.py index f8b2ca71b8..24c0dfc046 100644 --- a/tests/system-test/1-insert/create_retentions.py +++ b/tests/system-test/1-insert/create_retentions.py @@ -292,7 +292,7 @@ class TDTestCase: tdSql.execute(f"use {DB4}") self.__create_tb(rsma=True, dbname=DB4, ctb_num=db4_ctb_num, rsma_type="last") self.__insert_data(rows=self.rows, rsma=True, dbname=DB4, ctb_num=db4_ctb_num, rsma_type="last") - time.sleep(8) + time.sleep(7) tdSql.query(f"select count(*) from {DB4}.stb1 where ts > now()-5m") tdSql.checkRows(1) tdSql.checkData(0, 0, self.rows * db4_ctb_num) From 200b0cc93eae8f234f1a6b25dd41d39a43efb537 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 8 Aug 2022 17:12:14 +0800 Subject: [PATCH 34/36] fix: sql command 'show create stable' error --- source/libs/catalog/src/ctgAsync.c | 2 +- source/libs/nodes/src/nodesUtilFuncs.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 63d99cc58b..45e9a822cd 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -1082,7 +1082,7 @@ _return: ctgReleaseVgInfoToCache(pCtg, dbCache); } - if (pTask->res) { + if (pTask->res || code) { ctgHandleTaskEnd(pTask, code); } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 6f71b58aef..cbb0e8e59b 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -392,6 +392,9 @@ static void destroyDataSinkNode(SDataSinkNode* pNode) { nodesDestroyNode((SNode* static void destroyExprNode(SExprNode* pExpr) { taosArrayDestroy(pExpr->pAssociation); } static void destroyTableCfg(STableCfg* pCfg) { + if (NULL == pCfg) { + return; + } taosArrayDestroy(pCfg->pFuncs); taosMemoryFree(pCfg->pComment); taosMemoryFree(pCfg->pSchemas); From 2bc8db11b40271bae30e7155585fdbd23d6fb5e2 Mon Sep 17 00:00:00 2001 From: Yang Zhao Date: Mon, 8 Aug 2022 17:17:20 +0800 Subject: [PATCH 35/36] test: add python test to 3.0 ci and run it (#15829) * test: add python test to 3.0 ci and run it * ci: put taos.h in /usr/include directory * fix: python test case * fix: exit 0 with test success Co-authored-by: tangfangzhi --- docs/examples/go/query/sync/main.go | 2 +- docs/examples/python/native_insert_example.py | 16 +++---- tests/docs-examples-test/go.sh | 2 +- tests/docs-examples-test/python.sh | 47 +++++++++++++++++++ tests/parallel_test/collect_cases.sh | 2 +- tests/parallel_test/run_case.sh | 2 + 6 files changed, 60 insertions(+), 11 deletions(-) create mode 100644 tests/docs-examples-test/python.sh diff --git a/docs/examples/go/query/sync/main.go b/docs/examples/go/query/sync/main.go index e37164f47f..8156eea46a 100644 --- a/docs/examples/go/query/sync/main.go +++ b/docs/examples/go/query/sync/main.go @@ -31,6 +31,6 @@ func main() { log.Fatalln("scan error:\n", err) return } - log.Fatalln(r.ts, r.current) + log.Println(r.ts, r.current) } } diff --git a/docs/examples/python/native_insert_example.py b/docs/examples/python/native_insert_example.py index 3b6b73cb22..94fd00a6e9 100644 --- a/docs/examples/python/native_insert_example.py +++ b/docs/examples/python/native_insert_example.py @@ -1,13 +1,13 @@ import taos -lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"] +lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2"] def get_connection() -> taos.TaosConnection: diff --git a/tests/docs-examples-test/go.sh b/tests/docs-examples-test/go.sh index 185661e8a7..8248b4fe0d 100644 --- a/tests/docs-examples-test/go.sh +++ b/tests/docs-examples-test/go.sh @@ -4,7 +4,7 @@ set -e taosd >>/dev/null 2>&1 & taosadapter >>/dev/null 2>&1 & - +sleep 10 cd ../../docs/examples/go go mod tidy diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh new file mode 100644 index 0000000000..140d05395b --- /dev/null +++ b/tests/docs-examples-test/python.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +set -e + +taosd >>/dev/null 2>&1 & +taosadapter >>/dev/null 2>&1 & + +sleep 10 + +cd ../../docs/examples/python + +# 1 +taos -s "create database if not exists log" +python3 connect_example.py + +# 2 +taos -s "drop database if exists power" +python3 native_insert_example.py + +# 3 +taos -s "drop database power" +python3 bind_param_example.py + +# 4 +taos -s "drop database power" +python3 multi_bind_example.py + +# 5 +python3 query_example.py + +# 6 +python3 async_query_example.py + +# 7 +taos -s "drop database if exists test" +python3 line_protocol_example.py + +# 8 +taos -s "drop database test" +python3 telnet_line_protocol_example.py + +# 9 +taos -s "drop database test" +python3 json_protocol_example.py + +# 10 +# python3 subscribe_demo.py diff --git a/tests/parallel_test/collect_cases.sh b/tests/parallel_test/collect_cases.sh index 3294beebc1..802c014124 100755 --- a/tests/parallel_test/collect_cases.sh +++ b/tests/parallel_test/collect_cases.sh @@ -41,7 +41,7 @@ fi cat ../script/jenkins/basic.txt |grep -v "^#"|grep -v "^$"|sed "s/^/,,script,/" >>$case_file grep "^python" ../system-test/fulltest.sh |sed "s/^/,,system-test,/" >>$case_file grep "^python" ../develop-test/fulltest.sh |sed "s/^/,,develop-test,/" >>$case_file - +find ../docs-examples-test/ -name "*.sh" -printf '%f\n' | xargs -I {} echo ",,docs-examples-test,bash {}" >> $case_file # tar source code for run.sh to use # if [ $ent -eq 0 ]; then # cd ../../../ diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh index eda66a884a..58dcb87345 100755 --- a/tests/parallel_test/run_case.sh +++ b/tests/parallel_test/run_case.sh @@ -50,12 +50,14 @@ if [ $ent -eq 0 ]; then export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null + ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null CONTAINER_TESTDIR=/home/TDengine else export PATH=$PATH:/home/TDinternal/debug/build/bin export LD_LIBRARY_PATH=/home/TDinternal/debug/build/lib ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null + ln -s /home/TDinternal/community/include/client/taos.h /usr/include/taos.h 2>/dev/null CONTAINER_TESTDIR=/home/TDinternal/community fi mkdir -p /var/lib/taos/subscribe From 839d729d7aa5403235cec151f0b5332476819b26 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Mon, 8 Aug 2022 18:03:02 +0800 Subject: [PATCH 36/36] docs:modify faq --- docs/zh/27-train-faq/01-faq.md | 441 +++++++++++++++------------------ 1 file changed, 200 insertions(+), 241 deletions(-) diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 5b57e345c8..39d1cd7069 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -1,241 +1,200 @@ ---- -title: 常见问题及反馈 ---- - -## 问题反馈 - -如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: - -1. /var/log/taos (如果没有修改过默认路径) -2. /etc/taos - -附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。 - -为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。 - -``` - alter dnode debugFlag 135; -``` - -但系统正常运行时,请一定将 debugFlag 设置为 131,否则会产生大量的日志信息,降低系统效率。 - -## 常见问题列表 - -### 1. TDengine2.0 之前的版本升级到 2.0 及以上的版本应该注意什么?☆☆☆ - -2.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: - -1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg` -2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/` -3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/` -4. 安装最新稳定版本的 TDengine -5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 - -### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 - -### 3. 创建数据表时提示 more dnodes are needed - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。 - -### 4. 如何让 TDengine crash 时生成 core 文件? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 - -### 5. 遇到错误“Unable to establish connection” 怎么办? - -客户端遇到连接故障,请按照下面的步骤进行检查: - -1. 检查网络环境 - - - 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030-6042 的访问权限 - - 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname - - 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端 - -2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用 - -3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* - -4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 - -5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。 - -6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。 - -7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里 - -8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_) - -9. 如果仍不能排除连接故障 - - - Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅 - 检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} ` - 检查服务器侧 TCP 端口连接是否工作:`nc -l {port}` - 检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}` - - - Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问 - -10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括 TCP 和 UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 - -### 6. 遇到错误 “Unexpected generic error in RPC”或者“Unable to resolve FQDN” 怎么办? - -产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查: - -1. 请检查连接的服务器的 FQDN 是否正确,FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) -2. 如果网络配置有 DNS server,请检查是否正常工作 -3. 如果网络没有配置 DNS server,请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址 -4. 如果网络配置 OK,从客户端所在机器,你需要能 Ping 该连接的 FQDN,否则客户端是无法连接服务器的 -5. 如果服务器曾经使用过 TDengine,且更改过 hostname,建议检查 data 目录的 dnodeEps.json 是否符合当前配置的 EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。 -6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN - -### 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误? - -如果你确认语法正确,2.0 之前版本,请检查 SQL 语句长度是否超过 64K。如果超过,也会返回这个错误。 - -### 8. 是否支持 validation queries? - -TDengine 还没有一组专用的 validation queries。然而建议你使用系统监测的数据库”log"来做。 - - - -### 9. 我可以删除或更新一条记录吗? - -TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。 - -从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。 - -另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重)。这样设计的主要原因是,TDengine 把写入的数据看做一个数据流,无论时间戳是否出现冲突,TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。 - -此外,从 2.1.7.0 版本开始,支持将 UPDATE 参数设为 2,表示“支持部分列更新”。也即,当 UPDATE 设为 1 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL;而当 UPDATE 设为 2 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值。 - -### 10. 我怎么创建超过 1024 列的表? - -使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。) - -### 11. 最有效的写入数据的方法是什么? - -批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。 - -### 12. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决? - -Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse, IntelliJ,请确认 IDE 里的文件编码为 GBK(这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下: - -```JAVA -Class.forName("com.taosdata.jdbc.TSDBDriver"); -Properties properties = new Properties(); -properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); -Connection = DriverManager.getConnection(url, properties); -``` - -### 13. Windows 系统下客户端无法正常显示中文字符? - -Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。 - -【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置: - -``` -locale C -charset UTF-8 -``` - -### 14. JDBC 报错: the executed SQL is not a DML or a DDL? - -请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java) - -### 15. taos connect failed, reason: invalid timestamp - -常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。 - -### 16. 表名显示不全 - -由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 - -### 17. 如何进行数据迁移? - -TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事: - - - 2.0.0.0 至 2.0.6.x 的版本,重新配置机器 B 的 hostname 为机器 A 的 hostname。 - - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 - -### 18. 如何在命令行程序 taos 中临时调整日志级别 - -为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令: - -```sql -ALTER LOCAL flag_name flag_value; -``` - -其含义是,在当前的命令行程序下,修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置): - - - flag_name 的取值可以是:debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag - - flag_value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志) - -```sql -ALTER LOCAL RESETLOG; -``` - -其含义是,清空本机所有由客户端生成的日志文件。 - - - -### 19. go 语言编写组件编译失败怎样解决? - -TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 -使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 - -目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: - -```sh -go env -w GO111MODULE=on -go env -w GOPROXY=https://goproxy.cn,direct -``` - -如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 -`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 - -### 20. 如何查询数据占用的存储空间大小? - -默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 - -若想查看所有数据文件占用的具体大小,可以执行 Shell 指令:`du -sh /var/lib/taos/vnode --exclude='wal'` 来查看。此处排除了 WAL 目录,因为在持续写入的情况下,这里大小几乎是固定的,并且每当正常关闭 TDengine 让数据落盘后,WAL 目录都会清空。 - -若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 - -若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0) - -### 21. 客户端连接串如何保证高可用? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) - -### 22. 时间戳的时区信息是怎样处理的? - -TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 - -客户端在处理时间戳字符串时,会采取如下逻辑: - -1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 -2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 -3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 -4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 - -### 23. TDengine 2.0 都会用到哪些网络端口? - -使用到的网络端口请看文档:[serverport](/reference/config/#serverport) - -需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 - -### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? - -taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 - -需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 - -有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) - -### 25. 发生了 OOM 怎么办? - -OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 - -TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。 +--- +title: 常见问题及反馈 +--- + +## 问题反馈 + +如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: + +1. /var/log/taos (如果没有修改过默认路径) +2. /etc/taos(如果没有指定其他配置文件路径) + +附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。 + +为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。 + +``` + alter dnode 'debugFlag' '135'; +``` + +其中 dnode_id 请从 show dnodes; 命令输出中获取。 + +但系统正常运行时,请一定将 debugFlag 设置为 131,否则会产生大量的日志信息,降低系统效率。 + +## 常见问题列表 + +### 1. TDengine3.0 之前的版本升级到 3.0 及以上的版本应该注意什么? + +3.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: + +1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg` +2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/` +3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/` +4. 安装最新3.0稳定版本的 TDengine +5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 + +### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 + +### 3. 如何让 TDengine crash 时生成 core 文件? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 + +### 4. 遇到错误“Unable to establish connection” 怎么办? + +客户端遇到连接故障,请按照下面的步骤进行检查: + +1. 检查网络环境 + + - 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030/6041 的访问权限 + - 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname + - 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端 + +2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用 + +3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* + +4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 + +5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。 + +6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030/6041 上的 TCP/UDP 协议能够互通。 + +7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里 + +8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_) + +9. 如果仍不能排除连接故障 + + - Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅 + 检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} ` + 检查服务器侧 TCP 端口连接是否工作:`nc -l {port}` + 检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}` + + - Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问 + +10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)。 + +### 5. 遇到错误 Unable to resolve FQDN” 怎么办? + +产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查: + +1. 请检查连接的服务器的 FQDN 是否正确,FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) +2. 如果网络配置有 DNS server,请检查是否正常工作 +3. 如果网络没有配置 DNS server,请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址 +4. 如果网络配置 OK,从客户端所在机器,你需要能 Ping 该连接的 FQDN,否则客户端是无法连接服务器的 +5. 如果服务器曾经使用过 TDengine,且更改过 hostname,建议检查 data 目录的 dnode.json 是否符合当前配置的 EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。 +6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN + +### 6. 最有效的写入数据的方法是什么? + +批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。 + +### 7. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决? + +Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse, IntelliJ,请确认 IDE 里的文件编码为 GBK(这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下: + +```JAVA +Class.forName("com.taosdata.jdbc.TSDBDriver"); +Properties properties = new Properties(); +properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); +Connection = DriverManager.getConnection(url, properties); +``` + +### 8. Windows 系统下客户端无法正常显示中文字符? + +Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。 + +在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置: + +``` +locale C +charset UTF-8 +``` + +### 9. 表名显示不全 + +由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 + +### 10. 如何进行数据迁移? + +TDengine 是根据 hostname 唯一标志一台机器的,对于3.0版本,将数据文件从机器 A 移动机器 B 时,需要重新配置机器 B 的 hostname 为机器 A 的 hostname。 + +注:3.x 和 之前的1.x、2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 + +### 11. 如何在命令行程序 taos 中临时调整日志级别 + +为了调试方便,命令行程序 taos 新增了与日志记录相关的指令: + +```sql +ALTER LOCAL local_option + +local_option: { + 'resetLog' + | 'rpcDebugFlag' value + | 'tmrDebugFlag' value + | 'cDebugFlag' value + | 'uDebugFlag' value + | 'debugFlag' value +} +``` + +其含义是,在当前的命令行程序下,清空本机所有客户端生成的日志文件(resetLog),或修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置): + + - value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。 + +### 12. go 语言编写组件编译失败怎样解决? + +TDengine 3.0版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,提供restful接入功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 +使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 + +go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: + +```sh +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.cn,direct +``` + +### 13. 如何查询数据占用的存储空间大小? + +默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 + +若想查看所有数据文件占用的具体大小,可以执行 Shell 指令:`du -sh /var/lib/taos/vnode --exclude='wal'` 来查看。此处排除了 WAL 目录,因为在持续写入的情况下,这里大小几乎是固定的,并且每当正常关闭 TDengine 让数据落盘后,WAL 目录都会清空。 + +若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 + +### 14. 客户端连接串如何保证高可用? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) + +### 15. 时间戳的时区信息是怎样处理的? + +TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 + +客户端在处理时间戳字符串时,会采取如下逻辑: + +1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 +2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 +3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 +4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 + +### 16. TDengine 3.0 都会用到哪些网络端口? + +使用到的网络端口请看文档:[serverport](/reference/config/#serverport) + +需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 + +### 17. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功? + +taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 + +需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 + +有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) + +### 18. 发生了 OOM 怎么办? + +OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 + +TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 建库时的vgroups参数影响,每个 VNode 占用的内存大小受 buffer参数 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。