From 9c4c8493ad66330c64fce694ec12ac352e8303e5 Mon Sep 17 00:00:00 2001 From: SunShine Chan Date: Sat, 17 Jul 2021 12:11:32 +0800 Subject: [PATCH 01/81] Update docs.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit REMOVE "vnode之间的同步复制仅仅企业版支持" --- documentation20/cn/03.architecture/docs.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md index d22855198a..b481bea9f8 100644 --- a/documentation20/cn/03.architecture/docs.md +++ b/documentation20/cn/03.architecture/docs.md @@ -323,8 +323,6 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存 采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。 -注:vnode之间的同步复制仅仅企业版支持 - ## 缓存与持久化 ### 缓存 From 5cdbcdafc98aab69b595ee8c100f42fad0428479 Mon Sep 17 00:00:00 2001 From: SunShine Chan Date: Sat, 17 Jul 2021 12:13:08 +0800 Subject: [PATCH 02/81] Update docs.md REMOVE "Note: synchronous replication between vnodes is only supported in Enterprise Edition" --- documentation20/en/03.architecture/docs.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index 2e5fc7bd18..92813da2c4 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -322,8 +322,6 @@ For scenarios with higher data consistency requirements, asynchronous data repli With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication. -Note: synchronous replication between vnodes is only supported in Enterprise Edition - ## Caching and Persistence ### Caching From 756a60f91fb46b6c9dc8bd7c9332fb7ffedee376 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 21 Jul 2021 16:11:41 +0800 Subject: [PATCH 03/81] [TD-5213]:test 4096 columns without taosdemo --- .../insert4096columns_not_use_taosdemo.py | 703 ++++++++++++++++++ 1 file changed, 703 insertions(+) create mode 100644 tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py new file mode 100644 index 0000000000..dfa829866d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py @@ -0,0 +1,703 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +import os +import time +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + +class TDTestCase: + updatecfgDict={'maxSQLLength':1048576} + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1538548685000 + self.num = 100 + + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5213 + + print("==============step1, regular table, 1 ts + 4094 cols + 1 binary==============") + startTime = time.time() + sql = "create table regular_table_1(ts timestamp, " + for i in range(4094): + sql += "col%d int, " % (i + 1) + sql += "col4095 binary(22))" + tdLog.info(len(sql)) + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into regular_table_1 values(%d, " + for j in range(4094): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_1") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from regular_table_1") + tdSql.checkRows(self.num) + tdSql.checkCols(4096) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + #insert in order + tdLog.info('test insert in order') + for i in range(self.num): + sql = "insert into regular_table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4095) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 1000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_1") + tdSql.checkData(0, 0, 2*self.num) + tdSql.query("select * from regular_table_1") + tdSql.checkRows(2*self.num) + tdSql.checkCols(4096) + + #insert out of order + tdLog.info('test insert out of order') + for i in range(self.num): + sql = "insert into regular_table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4095) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 2000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_1") + tdSql.checkData(0, 0, 3*self.num) + tdSql.query("select * from regular_table_1") + tdSql.checkRows(3*self.num) + tdSql.checkCols(4096) + + + print("==============step2,regular table error col or value==============") + tdLog.info('test regular table exceeds row num') + # column > 4096 + sql = "create table regular_table_2(ts timestamp, " + for i in range(4095): + sql += "col%d int, " % (i + 1) + sql += "col4096 binary(22))" + tdLog.info(len(sql)) + tdSql.error(sql) + + # column > 4096 + sql = "insert into regular_table_1 values(%d, " + for j in range(4095): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.error(sql) + + # insert column < 4096 + sql = "insert into regular_table_1 values(%d, " + for j in range(4092): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.error(sql) + + # alter column > 4096 + sql = "alter table regular_table_1 add column max int; " + tdSql.error(sql) + + print("==============step3,regular table , mix data type==============") + startTime = time.time() + sql = "create table regular_table_3(ts timestamp, " + for i in range(2000): + sql += "col%d int, " % (i + 1) + for i in range(2000,4094): + sql += "col%d bigint, " % (i + 1) + sql += "col4095 binary(22))" + tdLog.info(len(sql)) + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into regular_table_3 values(%d, " + for j in range(4094): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_3") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from regular_table_3") + tdSql.checkRows(self.num) + tdSql.checkCols(4096) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + sql = "create table regular_table_4(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(4), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(10), " % (i + 1) + for i in range(4090,4094): + sql += "timestamp_%d timestamp, " % (i + 1) + sql += "col4095 binary(22))" + tdLog.info(len(sql)) + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into regular_table_4 values(%d, " + for j in range(500): + str = "'%s', " % random.randint(-2147483647,2147483647) + sql += str + for j in range(500,1000): + str = "'%s', " % random.randint(-32767,32767 ) + sql += str + for j in range(1000,1500): + str = "'%s', " % random.randint(-127,127) + sql += str + for j in range(1500,2000): + str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700) + sql += str + for j in range(2000,2500): + str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070) + sql += str + for j in range(2500,3000): + str = "'%s', " % random.choice(['true','false']) + sql += str + for j in range(3000,3500): + str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) + sql += str + for j in range(3500,3800): + str = "'%s', " % self.get_random_string(4) + sql += str + for j in range(3800,4090): + str = "'%s', " % self.get_random_string(10) + sql += str + for j in range(4090,4094): + str = "%s, " % (self.ts + j) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_4") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from regular_table_4") + tdSql.checkRows(self.num) + tdSql.checkCols(4096) + tdLog.info("end ,now new one") + + #insert null value + tdLog.info('test insert null value') + for i in range(self.num): + sql = "insert into regular_table_4 values(%d, " + for j in range(2500): + str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ]) + sql += str + for j in range(2500,3000): + str = "'%s', " % random.choice(['true' ,'false']) + sql += str + for j in range(3000,3500): + str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) + sql += str + for j in range(3500,3800): + str = "'%s', " % self.get_random_string(4) + sql += str + for j in range(3800,4090): + str = "'%s', " % self.get_random_string(10) + sql += str + for j in range(4090,4094): + str = "%s, " % (self.ts + j) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 10000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_4") + tdSql.checkData(0, 0, 2*self.num) + tdSql.query("select * from regular_table_4") + tdSql.checkRows(2*self.num) + tdSql.checkCols(4096) + + #insert in order + tdLog.info('test insert in order') + for i in range(self.num): + sql = "insert into regular_table_4 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4095) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,100) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 1000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_4") + tdSql.checkData(0, 0, 3*self.num) + tdSql.query("select * from regular_table_4") + tdSql.checkRows(3*self.num) + tdSql.checkCols(4096) + + #insert out of order + tdLog.info('test insert out of order') + for i in range(self.num): + sql = "insert into regular_table_4 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4095) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,100) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 2000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_4") + tdSql.checkData(0, 0, 4*self.num) + tdSql.query("select * from regular_table_4") + tdSql.checkRows(4*self.num) + tdSql.checkCols(4096) + + #define TSDB_MAX_BYTES_PER_ROW 49151[old:1024 && 16384] + #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset] + tdLog.info('test regular_table max bytes per row 49151') + sql = "create table regular_table_5(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(20), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(34), " % (i + 1) + for i in range(4090,4094): + sql += "timestamp_%d timestamp, " % (i + 1) + sql += "col4095 binary(69))" + tdSql.execute(sql) + tdSql.query("select * from regular_table_5") + tdSql.checkCols(4096) + # TD-5324 + sql = "alter table regular_table_5 modify column col4095 binary(70); " + tdSql.error(sql) + + # drop and add + sql = "alter table regular_table_5 drop column col4095; " + tdSql.execute(sql) + sql = "select * from regular_table_5; " + tdSql.query(sql) + tdSql.checkCols(4095) + sql = "alter table regular_table_5 add column col4095 binary(70); " + tdSql.error(sql) + sql = "alter table regular_table_5 add column col4095 binary(69); " + tdSql.execute(sql) + sql = "select * from regular_table_5; " + tdSql.query(sql) + tdSql.checkCols(4096) + + #out TSDB_MAX_BYTES_PER_ROW 49151 + tdLog.info('test regular_table max bytes per row out 49151') + sql = "create table regular_table_6(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(20), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(34), " % (i + 1) + for i in range(4090,4094): + sql += "timestamp_%d timestamp, " % (i + 1) + sql += "col4095 binary(70))" + tdLog.info(len(sql)) + tdSql.error(sql) + + + print("==============step4, super table , 1 ts + 4090 cols + 4 tags ==============") + startTime = time.time() + sql = "create stable stable_1(ts timestamp, " + for i in range(4090): + sql += "col%d int, " % (i + 1) + sql += "col4091 binary(22))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdLog.info(len(sql)) + tdSql.execute(sql) + sql = '''create table table_0 using stable_1 + tags('table_0' , '1' , '2' , '3' );''' + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into table_0 values(%d, " + for j in range(4090): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from table_0") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from table_0") + tdSql.checkRows(self.num) + tdSql.checkCols(4092) + + sql = '''create table table_1 using stable_1 + tags('table_1' , '1' , '2' , '3' );''' + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into table_1 values(%d, " + for j in range(2080): + sql += "'%d', " % random.randint(0,1000) + for j in range(2080,4080): + sql += "'%s', " % 'NULL' + for j in range(4080,4090): + sql += "'%s', " % random.randint(0,10000) + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from table_1") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from table_1") + tdSql.checkRows(self.num) + tdSql.checkCols(4092) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + #insert in order + tdLog.info('test insert in order') + for i in range(self.num): + sql = "insert into table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4091) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 1000)) + time.sleep(1) + tdSql.query("select count(*) from table_1") + tdSql.checkData(0, 0, 2*self.num) + tdSql.query("select * from table_1") + tdSql.checkRows(2*self.num) + tdSql.checkCols(4092) + + #insert out of order + tdLog.info('test insert out of order') + for i in range(self.num): + sql = "insert into table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4091) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 2000)) + time.sleep(1) + tdSql.query("select count(*) from table_1") + tdSql.checkData(0, 0, 3*self.num) + tdSql.query("select * from table_1") + tdSql.checkRows(3*self.num) + tdSql.checkCols(4092) + + print("==============step5,stable table , mix data type==============") + sql = "create stable stable_3(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(4), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(10), " % (i + 1) + sql += "col4091 binary(22))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdLog.info(len(sql)) + tdSql.execute(sql) + sql = '''create table table_30 using stable_3 + tags('table_30' , '1' , '2' , '3' );''' + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into table_30 values(%d, " + for j in range(500): + str = "'%s', " % random.randint(-2147483647,2147483647) + sql += str + for j in range(500,1000): + str = "'%s', " % random.randint(-32767,32767 ) + sql += str + for j in range(1000,1500): + str = "'%s', " % random.randint(-127,127) + sql += str + for j in range(1500,2000): + str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700) + sql += str + for j in range(2000,2500): + str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070) + sql += str + for j in range(2500,3000): + str = "'%s', " % random.choice(['true','false']) + sql += str + for j in range(3000,3500): + str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) + sql += str + for j in range(3500,3800): + str = "'%s', " % self.get_random_string(4) + sql += str + for j in range(3800,4090): + str = "'%s', " % self.get_random_string(10) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from table_30") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from table_30") + tdSql.checkRows(self.num) + tdSql.checkCols(4092) + + #insert null value + tdLog.info('test insert null value') + sql = '''create table table_31 using stable_3 + tags('table_31' , '1' , '2' , '3' );''' + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into table_31 values(%d, " + for j in range(2500): + str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ]) + sql += str + for j in range(2500,3000): + str = "'%s', " % random.choice(['true' ,'false']) + sql += str + for j in range(3000,3500): + str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) + sql += str + for j in range(3500,3800): + str = "'%s', " % self.get_random_string(4) + sql += str + for j in range(3800,4090): + str = "'%s', " % self.get_random_string(10) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from table_31") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from table_31") + tdSql.checkRows(self.num) + tdSql.checkCols(4092) + + #insert in order + tdLog.info('test insert in order') + for i in range(self.num): + sql = "insert into table_31 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4091) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,100) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 1000)) + time.sleep(1) + tdSql.query("select count(*) from table_31") + tdSql.checkData(0, 0, 2*self.num) + tdSql.query("select * from table_31") + tdSql.checkRows(2*self.num) + tdSql.checkCols(4092) + + #insert out of order + tdLog.info('test insert out of order') + for i in range(self.num): + sql = "insert into table_31 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4091) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,100) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 2000)) + time.sleep(1) + tdSql.query("select count(*) from table_31") + tdSql.checkData(0, 0, 3*self.num) + tdSql.query("select * from table_31") + tdSql.checkRows(3*self.num) + tdSql.checkCols(4092) + + #define TSDB_MAX_BYTES_PER_ROW 49151 TSDB_MAX_TAGS_LEN 16384 + #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset] + tdLog.info('test super table max bytes per row 49151') + sql = "create table stable_4(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(20), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(34), " % (i + 1) + sql += "col4091 binary(101))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdSql.execute(sql) + sql = '''create table table_40 using stable_4 + tags('table_40' , '1' , '2' , '3' );''' + tdSql.execute(sql) + tdSql.query("select * from table_40") + tdSql.checkCols(4092) + tdSql.query("describe table_40") + tdSql.checkRows(4096) + + tdLog.info('test super table drop and add column or tag') + sql = "alter stable stable_4 drop column col4091; " + tdSql.execute(sql) + sql = "select * from stable_4; " + tdSql.query(sql) + tdSql.checkCols(4095) + sql = "alter table stable_4 add column col4091 binary(102); " + tdSql.error(sql) + sql = "alter table stable_4 add column col4091 binary(101); " + tdSql.execute(sql) + sql = "select * from stable_4; " + tdSql.query(sql) + tdSql.checkCols(4096) + + sql = "alter stable stable_4 drop tag tag_1; " + tdSql.execute(sql) + sql = "select * from stable_4; " + tdSql.query(sql) + tdSql.checkCols(4095) + sql = "alter table stable_4 add tag tag_1 int; " + tdSql.execute(sql) + sql = "select * from stable_4; " + tdSql.query(sql) + tdSql.checkCols(4096) + sql = "alter table stable_4 add tag loc1 nchar(10); " + tdSql.error(sql) + + tdLog.info('test super table max bytes per row 49151') + sql = "create table stable_5(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(20), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(34), " % (i + 1) + sql += "col4091 binary(102))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdSql.error(sql) + + print("==============step6, super table error col ==============") + tdLog.info('test exceeds row num') + # column + tag > 4096 + sql = "create stable stable_2(ts timestamp, " + for i in range(4091): + sql += "col%d int, " % (i + 1) + sql += "col4092 binary(22))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdLog.info(len(sql)) + tdSql.error(sql) + + # column + tag > 4096 + sql = "create stable stable_2(ts timestamp, " + for i in range(4090): + sql += "col%d int, " % (i + 1) + sql += "col4091 binary(22))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int,tag_4 int) " + tdLog.info(len(sql)) + tdSql.error(sql) + + # alter column + tag > 4096 + sql = "alter table stable_1 add column max int; " + tdSql.error(sql) + # TD-5322 + sql = "alter table stable_1 add tag max int; " + tdSql.error(sql) + # TD-5324 + sql = "alter table stable_4 modify column col4091 binary(102); " + tdSql.error(sql) + sql = "alter table stable_4 modify tag loc nchar(20); " + tdSql.query("select * from table_40") + tdSql.checkCols(4092) + tdSql.query("describe table_40") + tdSql.checkRows(4096) + + os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 1ab5ce7fa9c33b5375a3775597cc0399162e52a4 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 21 Jul 2021 16:12:04 +0800 Subject: [PATCH 04/81] [TD-5213]:test 4096 columns with taosdemo --- .../TD-5213/insertSigcolumnsNum4096.py | 181 +++++++++++------- 1 file changed, 111 insertions(+), 70 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py index 78bd0c7e60..511c274827 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -39,7 +39,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): @@ -48,86 +48,127 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql + #-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force + #regular old && new + os.system("%staosdemo -N -d regular_old -t 1000 -n 10 -l 1023 -y" % binPath) + tdSql.execute("use regular_old") + tdSql.query("show tables;") + tdSql.checkRows(1000) + tdSql.query("select * from d0;") + tdSql.checkCols(1024) + tdSql.query("describe d0;") + tdSql.checkRows(1024) + + os.system("%staosdemo -N -d regular_new -t 1000 -n 10 -l 4095 -y" % binPath) + tdSql.execute("use regular_new") + tdSql.query("show tables;") + tdSql.checkRows(1000) + tdSql.query("select * from d0;") + tdSql.checkCols(4096) + tdSql.query("describe d0;") + tdSql.checkRows(4096) + + #super table -d:database name -t:table num -n:rows num per table -l:col num -y:force + os.system("%staosdemo -d super_old -t 1000 -n 10 -l 1021 -y" % binPath) + tdSql.execute("use super_old") + tdSql.query("show tables;") + tdSql.checkRows(1000) + tdSql.query("select * from meters;") + tdSql.checkCols(1024) + tdSql.query("select * from d0;") + tdSql.checkCols(1022) + tdSql.query("describe meters;") + tdSql.checkRows(1024) + tdSql.query("describe d0;") + tdSql.checkRows(1024) + + os.system("%staosdemo -d super_new -t 1000 -n 10 -l 4093 -y" % binPath) + tdSql.execute("use super_new") + tdSql.query("show tables;") + tdSql.checkRows(1000) + tdSql.query("select * from meters;") + tdSql.checkCols(4096) + tdSql.query("select * from d0;") + tdSql.checkCols(4094) + tdSql.query("describe meters;") + tdSql.checkRows(4096) + tdSql.query("describe d0;") + tdSql.checkRows(4096) + tdSql.execute("create table stb_new1_1 using meters tags(1,2)") + tdSql.query("select * from stb_new1_1") + tdSql.checkCols(4094) + tdSql.query("describe stb_new1_1;") + tdSql.checkRows(4096) + + tdLog.info("stop dnode to commit data to disk") + tdDnodes.stop(1) + tdDnodes.start(1) + + # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-5213 os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath) - tdSql.execute("use db") + tdSql.execute("use json") tdSql.query("select count (tbname) from stb_old") tdSql.checkData(0, 0, 10) - # tdSql.query("select * from stb_old") - # tdSql.checkRows(10) - # tdSql.checkCols(1024) + tdSql.query("select * from stb_old") + tdSql.checkRows(1000) + tdSql.checkCols(1024) + + tdSql.query("select count (tbname) from stb_new") + tdSql.checkData(0, 0, 10) - # tdSql.query("select count (tbname) from stb_new") - # tdSql.checkData(0, 0, 10) + tdSql.query("select * from stb_new") + tdSql.checkRows(1000) + tdSql.checkCols(4096) + tdSql.query("describe stb_new;") + tdSql.checkRows(4096) + tdSql.query("select * from stb_new_1") + tdSql.checkRows(100) + tdSql.checkCols(4091) + tdSql.query("describe stb_new_1;") + tdSql.checkRows(4096) + tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)") + tdSql.query("select * from stb_new1_1") + tdSql.checkCols(4091) + tdSql.query("describe stb_new1_1;") + tdSql.checkRows(4096) - # tdSql.query("select * from stb_new") - # tdSql.checkRows(10) - # tdSql.checkCols(4096) + tdSql.query("select count (tbname) from stb_mix") + tdSql.checkData(0, 0, 10) - # tdLog.info("stop dnode to commit data to disk") - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdSql.query("select * from stb_mix") + tdSql.checkRows(1000) + tdSql.checkCols(4096) + tdSql.query("describe stb_mix;") + tdSql.checkRows(4096) + tdSql.query("select * from stb_mix_1") + tdSql.checkRows(100) + tdSql.checkCols(4092) + tdSql.query("describe stb_mix_1;") + tdSql.checkRows(4096) - #regular table - sql = "create table tb(ts timestamp, " - for i in range(1022): - sql += "c%d binary(14), " % (i + 1) - sql += "c1023 binary(22))" - tdSql.execute(sql) - - for i in range(4): - sql = "insert into tb values(%d, " - for j in range(1022): - str = "'%s', " % self.get_random_string(14) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - - time.sleep(10) - tdSql.query("select count(*) from tb") - tdSql.checkData(0, 0, 4) - - tdDnodes.stop(1) - tdDnodes.start(1) - - time.sleep(1) - tdSql.query("select count(*) from tb") - tdSql.checkData(0, 0, 4) - - - sql = "create table tb1(ts timestamp, " - for i in range(4094): - sql += "c%d binary(14), " % (i + 1) - sql += "c4095 binary(22))" - tdSql.execute(sql) - - for i in range(4): - sql = "insert into tb1 values(%d, " - for j in range(4094): - str = "'%s', " % self.get_random_string(14) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - - time.sleep(10) - tdSql.query("select count(*) from tb1") - tdSql.checkData(0, 0, 4) - - tdDnodes.stop(1) - tdDnodes.start(1) - - time.sleep(1) - tdSql.query("select count(*) from tb1") - tdSql.checkData(0, 0, 4) - - - - #os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql") + tdSql.query("select count (tbname) from stb_excel") + tdSql.checkData(0, 0, 10) + tdSql.query("select * from stb_excel") + tdSql.checkRows(1000) + tdSql.checkCols(4096) + tdSql.query("describe stb_excel;") + tdSql.checkRows(4096) + tdSql.query("select * from stb_excel_1") + tdSql.checkRows(100) + tdSql.checkCols(4092) + tdSql.query("describe stb_excel_1;") + tdSql.checkRows(4096) + + + os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql") + + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From d966ee23d607c297fd535a5882f5489d99cee9d8 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 21 Jul 2021 16:12:18 +0800 Subject: [PATCH 05/81] [TD-5213]:test 4096 columns with taosdemo --- .../TD-5213/insertSigcolumnsNum4096.json | 43 +++++++++++++++---- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json index e2b15b83a7..03958f2e6f 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -15,7 +15,7 @@ "max_sql_len": 102400000, "databases": [{ "dbinfo": { - "name": "db", + "name": "json", "drop": "yes", "replica": 1, "days": 10, @@ -39,7 +39,7 @@ "childtable_prefix": "stb_old_", "auto_create_table": "no", "batch_create_tbl_num": 5, - "data_source": "sample", + "data_source": "rand", "insert_mode": "taosc", "insert_rows": 100, "childtable_limit": 0, @@ -55,7 +55,7 @@ "sample_format": "csv", "sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv", "tags_file": "", - "columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":1}], + "columns": [{"type": "INT","count":1000}, {"type": "BINARY", "len": 16, "count":20}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] },{ "name": "stb_new", @@ -80,13 +80,13 @@ "sample_format": "csv", "sample_file": "./tools/taosdemoAllTest/sample.csv", "tags_file": "", - "columns": [{"type": "DOUBLE","count":1020}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + "columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":90}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":3}] },{ - "name": "stb_int", + "name": "stb_mix", "child_table_exists":"no", "childtable_count": 10, - "childtable_prefix": "stb_int_", + "childtable_prefix": "stb_mix_", "auto_create_table": "no", "batch_create_tbl_num": 5, "data_source": "rand", @@ -105,8 +105,33 @@ "sample_format": "csv", "sample_file": "./tools/taosdemoAllTest/sample.csv", "tags_file": "", - "columns": [{"type": "int","count":1020}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "TINYINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 20,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}], + "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}] + },{ + "name": "stb_excel", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb_excel_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv", + "tags_file": "", + "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "SMALLINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 19,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}], + "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}] }] }] } From f12d1f27694f97b24142dae7be0d9d5e0729bac4 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 21 Jul 2021 16:12:32 +0800 Subject: [PATCH 06/81] [TD-5213]:test 4096 columns with taosdemo --- .../TD-5213/insertSigcolumnsNum4096.csv | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv index 078c3b93df..5b30be5b4c 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv @@ -1,10 +1,3 @@ -0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -2,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -4,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -5,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -6,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -7,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -8,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -9,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file +1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091 +1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL +1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,4,NULL,NULL,NULL,NULL,NULL,NULL,5,NULL,NULL,6,NULL,NULL,NULL,7,NULL,NULL,NULL,8,NULL,NULL,NULL,9,NULL,NULL,10 \ No newline at end of file From c978fb9329c89e67dd607428005f6a9951e5b208 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 21 Jul 2021 16:12:54 +0800 Subject: [PATCH 07/81] [TD-5213]:test 4096 columns --- tests/pytest/fulltest.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 63a5431d75..e452ed3de4 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -355,6 +355,8 @@ python3 ./test.py -f tag_lite/alter_tag.py # python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py +python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py +python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py python3 ./test.py -f tag_lite/drop_auto_create.py python3 test.py -f insert/insert_before_use_db.py python3 test.py -f alter/alter_keep.py From a54d3a2b962285116befe9d3a4094da023b8175e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 22 Jul 2021 04:33:58 +0800 Subject: [PATCH 08/81] TD-5438 --- src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 2 +- src/query/inc/qExecutor.h | 3 ++- src/query/inc/qTableMeta.h | 3 ++- src/query/src/qExecutor.c | 24 +++++++++++++++++++----- src/query/src/qPlan.c | 17 ++++++++++++----- 6 files changed, 37 insertions(+), 14 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b72bd78b1b..771e92a7e4 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2892,7 +2892,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d", pParentSql->self, pSql->self, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx); - if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd)->distinctTag)) { + if (num > tsMaxNumOfOrderedResults && /*tscIsProjectionQueryOnSTable(pQueryInfo, 0) &&*/ !(tscGetQueryInfo(&pParentSql->cmd)->distinct)) { tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64, pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num); tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a78e5fa5f2..e8750b7758 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4597,7 +4597,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo); pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo); pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo); - pQueryAttr->distinctTag = pQueryInfo->distinctTag; + pQueryAttr->distinct = pQueryInfo->distinct; pQueryAttr->sw = pQueryInfo->sessionWindow; pQueryAttr->stateWindow = pQueryInfo->stateWindow; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 4581ba258d..ce70a9ba4a 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -216,7 +216,7 @@ typedef struct SQueryAttr { bool simpleAgg; bool pointInterpQuery; // point interpolation query bool needReverseScan; // need reverse scan - bool distinctTag; // distinct tag query + bool distinct; // distinct query or not bool stateWindow; // window State on sub/normal table bool createFilterOperator; // if filter operator is needed int32_t interBufSize; // intermediate buffer sizse @@ -514,6 +514,7 @@ typedef struct SDistinctOperatorInfo { bool recordNullVal; //has already record the null value, no need to try again int64_t threshold; int64_t outputCapacity; + int32_t colIndex; } SDistinctOperatorInfo; struct SGlobalMerger; diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h index 56eea6429f..520dade668 100644 --- a/src/query/inc/qTableMeta.h +++ b/src/query/inc/qTableMeta.h @@ -121,7 +121,8 @@ typedef struct SQueryInfo { int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX - bool distinctTag; // distinct tag or not + bool distinct; // distinct tag or not + bool onlyHasTagCond; int32_t round; // 0/1/.... int32_t bufLen; char* buf; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 288a206491..1ed3f7759f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6479,7 +6479,7 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) { pOperator->status = OP_EXEC_DONE; qDebug("QInfo:0x%"PRIx64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count); } else { // return only the tags|table name etc. - SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo + SExprInfo* pExprInfo = &pOperator->pExpr[0]; // todo use the column list instead of exprinfo count = 0; while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) { @@ -6565,13 +6565,25 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + break; + } + if (pInfo->colIndex == -1) { + for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i); + if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) { + pInfo->colIndex = i; + break; + } + } + } + if (pInfo->colIndex == -1) { setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); pOperator->status = OP_EXEC_DONE; return NULL; } - - assert(pBlock->info.numOfCols == 1); - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0); + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex); int16_t bytes = pColInfoData->info.bytes; int16_t type = pColInfoData->info.type; @@ -6614,7 +6626,8 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo)); - + pInfo->colIndex = -1; + pInfo->threshold = 10000000; // distinct result threshold pInfo->outputCapacity = 4096; pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity); @@ -6628,6 +6641,7 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat pOperator->info = pInfo; pOperator->pRuntimeEnv = pRuntimeEnv; pOperator->exec = hashDistinct; + pOperator->pExpr = pExpr; pOperator->cleanup = destroyDistinctOperatorInfo; appendUpstream(pOperator, upstream); diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index e724b0418c..a94d015e06 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -104,7 +104,7 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo* int32_t num = (int32_t) taosArrayGetSize(pExprs); SQueryNode* pNode = createQueryNode(QNODE_TAGSCAN, "TableTagScan", NULL, 0, pExprs->pData, num, info, NULL); - if (pQueryInfo->distinctTag) { + if (pQueryInfo->distinct) { pNode = createQueryNode(QNODE_DISTINCT, "Distinct", &pNode, 1, pExprs->pData, num, info, NULL); } @@ -551,9 +551,11 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { int32_t op = 0; if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query - op = OP_TagScan; - taosArrayPush(plan, &op); - if (pQueryAttr->distinctTag) { + if (onlyQueryTags(pQueryAttr)) { + op = OP_TagScan; + taosArrayPush(plan, &op); + } + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); } @@ -630,8 +632,13 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } else { op = OP_Project; taosArrayPush(plan, &op); + if (pQueryAttr->distinct) { + op = OP_Distinct; + taosArrayPush(plan, &op); + } } } + if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) { op = OP_Limit; @@ -651,7 +658,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { int32_t op = OP_MultiwayMergeSort; taosArrayPush(plan, &op); - if (pQueryAttr->distinctTag) { + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); } From cacf85993e940f1c1b122c176ec8951e433ff9aa Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 22 Jul 2021 05:03:45 +0800 Subject: [PATCH 09/81] [TD-5438] enhance distinct --- src/client/src/tscSQLParser.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index adea1cc453..513a116cf7 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1947,12 +1947,13 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) { if (pQueryInfo == NULL) { return false; } - if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY) { + if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY + && (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) { return false; } - if (tscQueryTags(pQueryInfo) && tscNumOfExprs(pQueryInfo) == 1){ + if (tscNumOfExprs(pQueryInfo) == 1){ return true; - } + } return false; } @@ -2041,7 +2042,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg1 = "too many items in selection clause"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "not support query expression"; - const char* msg4 = "only support distinct one tag"; + const char* msg4 = "only support distinct one column or tag"; const char* msg5 = "invalid function name"; // too many result columns not support order by in query @@ -2101,13 +2102,13 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS } if (hasDistinct == true) { - if (!isValidDistinctSql(pQueryInfo)) { + if (!isValidDistinctSql(pQueryInfo) ) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - - pQueryInfo->distinctTag = true; + pQueryInfo->distinct = true; } + // there is only one user-defined column in the final result field, add the timestamp column. size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList); if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) { @@ -3971,8 +3972,10 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t relOptr) { if (pExpr == NULL) { + pQueryInfo->onlyHasTagCond &= true; return TSDB_CODE_SUCCESS; } + pQueryInfo->onlyHasTagCond &= false; if (!tSqlExprIsParentOfLeaf(pExpr)) { // internal node int32_t ret = getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId); @@ -4099,6 +4102,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) { if (pExpr == NULL) { + pQueryInfo->onlyHasTagCond &= true; return TSDB_CODE_SUCCESS; } @@ -4778,8 +4782,11 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE int32_t code = 0; if (pExpr == NULL) { + pQueryInfo->onlyHasTagCond &= true; return TSDB_CODE_SUCCESS; } + pQueryInfo->onlyHasTagCond &= false; + if (!tSqlExprIsParentOfLeaf(pExpr)) { if (pExpr->tokenId == TK_OR) { @@ -4828,11 +4835,13 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { if (pQueryInfo->numOfTables == 1) { + pQueryInfo->onlyHasTagCond &= true; return TSDB_CODE_SUCCESS; } else { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } } + pQueryInfo->onlyHasTagCond &= false; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // for stable join, tag columns @@ -5145,12 +5154,13 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } - + const char* msg1 = "invalid expression"; const char* msg2 = "invalid filter expression"; int32_t ret = TSDB_CODE_SUCCESS; + pQueryInfo->onlyHasTagCond = true; // tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space SStringBuilder sb; memset(&sb, 0, sizeof(sb)); SCondExpr condExpr = {0}; @@ -5178,6 +5188,7 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq if ((ret = getTimeRangeFromExpr(&pSql->cmd, pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { return ret; } + // 3. get the tag query condition if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) { @@ -5487,7 +5498,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - if (pQueryInfo->distinctTag == true) { + if (pQueryInfo->distinct == true) { pQueryInfo->order.order = TSDB_ORDER_ASC; pQueryInfo->order.orderColId = 0; return TSDB_CODE_SUCCESS; @@ -8583,6 +8594,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf return TSDB_CODE_TSC_INVALID_OPERATION; } + if (isSTable && tscQueryTags(pQueryInfo) && pQueryInfo->distinct && !pQueryInfo->onlyHasTagCond) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + // parse the window_state if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, isSTable) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; From ff9748b473a4a6067705e0b7c36b38f0b6e2e148 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 22 Jul 2021 13:29:39 +0800 Subject: [PATCH 10/81] [TD-5474] fix runtime bug --- src/query/src/qExecutor.c | 55 +++++++++++++++++++++++++++++++++---- src/tsdb/src/tsdbMain.c | 2 +- src/tsdb/src/tsdbRead.c | 14 ++++++---- src/util/src/tcompression.c | 4 +-- 4 files changed, 61 insertions(+), 14 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 982c45c441..25a4448002 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -97,12 +97,47 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) +#define TSKEY_MAX_ADD(a,b) \ +do { \ + if (a < 0) { a = a + b; break;} \ + if (sizeof(a) == sizeof(int32_t)) { \ + if((b) > 0 && ((b) >= INT32_MAX - (a))){\ + a = INT32_MAX; \ + } else { \ + a = a + b; \ + } \ + } else { \ + if((b) > 0 && ((b) >= INT64_MAX - (a))){\ + a = INT64_MAX; \ + } else { \ + a = a + b; \ + } \ + } \ +} while(0) + +#define TSKEY_MIN_SUB(a,b) \ +do { \ + if (a >= 0) { a = a + b; break;} \ + if (sizeof(a) == sizeof(int32_t)){ \ + if((b) < 0 && ((b) <= INT32_MIN - (a))){\ + a = INT32_MIN; \ + } else { \ + a = a + b; \ + } \ + } else { \ + if((b) < 0 && ((b) <= INT64_MIN-(a))) {\ + a = INT64_MIN; \ + } else { \ + a = a + b; \ + } \ + } \ +} while (0) + uint64_t queryHandleId = 0; int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } - int64_t genQueryId(void) { int64_t uid = 0; int64_t did = tsDnodeId; @@ -3124,7 +3159,9 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt || pLocalExprInfo->base.resType == TSDB_DATA_TYPE_TIMESTAMP) { memcpy(pRuntimeEnv->tagVal + offset, &pCtx[idx].tag.i64, pLocalExprInfo->base.resBytes); } else { - memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen); + if (pCtx[idx].tag.pz != NULL) { + memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen); + } } offset += pLocalExprInfo->base.resBytes; @@ -3934,8 +3971,8 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti // refactor : extract method SColumnInfoData* pInfoData = taosArrayGet(pBlock->pDataBlock, 0); - - if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + //add condition (pBlock->info.rows >= 1) just to runtime happy + if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pBlock->info.rows >= 1) { STimeWindow* w = &pBlock->info.window; w->skey = *(int64_t*)pInfoData->pData; w->ekey = *(int64_t*)(((char*)pInfoData->pData) + TSDB_KEYSIZE * (pBlock->info.rows - 1)); @@ -5273,7 +5310,15 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - TSKEY key = QUERY_IS_ASC_QUERY(pQueryAttr)? pBlock->info.window.ekey + 1:pBlock->info.window.skey-1; + TSKEY key = 0; + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { + key = pBlock->info.window.ekey; + TSKEY_MAX_ADD(key, 1); + } else { + key = pBlock->info.window.skey; + TSKEY_MIN_SUB(key, -1); + } + setExecutionContext(pRuntimeEnv, pInfo, pOperator->numOfOutput, pRuntimeEnv->current->groupIndex, key); doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock); } diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 8266a7c20f..f995295745 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -991,4 +991,4 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) { } return 0; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 0957a777e3..7a595da39a 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -640,7 +640,7 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); STableGroupInfo* pNew = calloc(1, sizeof(STableGroupInfo)); - pNew->pGroupList = taosArrayInit(numOfGroup, sizeof(SArray)); + pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES); for(int32_t i = 0; i < numOfGroup; ++i) { SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i); @@ -3383,11 +3383,13 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa type = TSDB_DATA_TYPE_BINARY; bytes = tGetTbnameColumnSchema()->bytes; } else { - STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); - bytes = pCol->bytes; - type = pCol->type; - f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); - f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); + if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) { + STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); + bytes = pCol->bytes; + type = pCol->type; + f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); + f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); + } } // this tags value may be NULL diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c index a11f6a2f3d..92e85d1a31 100644 --- a/src/util/src/tcompression.c +++ b/src/util/src/tcompression.c @@ -159,7 +159,7 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o break; } // Get difference. - if (!safeInt64Add(curr_value, -prev_value)) goto _copy_and_exit; + if (!safeInt64Add(curr_value, -prev_value_tmp)) goto _copy_and_exit; int64_t diff = curr_value - prev_value_tmp; // Zigzag encode the value. @@ -993,4 +993,4 @@ int tsDecompressDoubleLossyImp(const char * input, int compressedSize, const int // decompressed with sz return tdszDecompress(SZ_DOUBLE, input + 1, compressedSize - 1, nelements, output); } -#endif \ No newline at end of file +#endif From 6a9d94fd97eec570e15c15ce5e2b01848ee60b48 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 22 Jul 2021 14:38:08 +0800 Subject: [PATCH 11/81] [TD-5474] fix runtime bug --- src/plugins/http/src/httpGcJson.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c index f33a994465..2d361d3794 100644 --- a/src/plugins/http/src/httpGcJson.c +++ b/src/plugins/http/src/httpGcJson.c @@ -199,7 +199,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, for (int32_t i = dataFields; i >= 0; i--) { httpJsonItemToken(jsonBuf); - if (row[i] == NULL) { + if (row == NULL || i >= num_fields || row[i] == NULL) { httpJsonOriginString(jsonBuf, "null", 4); continue; } From 1754d66494913d98906410693546d56b9fb5aa14 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 22 Jul 2021 17:04:11 +0800 Subject: [PATCH 12/81] [TD-5458]:add duplicate key check for tag&field --- src/client/src/tscParseLineProtocol.c | 44 ++++++++++++++++++++++++--- tests/examples/c/schemaless.c | 40 ++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index cccc81274d..4db191f886 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -1489,7 +1489,31 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) { return ret; } -static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index) { +static bool checkDuplicateKey(char *key, SHashObj *pHash) { + char *val = NULL; + char *cur = key; + char keyLower[TSDB_COL_NAME_LEN]; + size_t keyLen = 0; + while(*cur != '\0') { + keyLower[keyLen] = tolower(*cur); + keyLen++; + cur++; + } + keyLower[keyLen] = '\0'; + + val = taosHashGet(pHash, keyLower, keyLen); + if (val) { + tscError("Duplicate key:%s", keyLower); + return true; + } + + uint8_t dummy_val = 0; + taosHashPut(pHash, keyLower, strlen(key), &dummy_val, sizeof(uint8_t)); + + return false; +} + +static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash) { const char *cur = *index; char key[TSDB_COL_NAME_LEN]; uint16_t len = 0; @@ -1518,6 +1542,10 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index) { } key[len] = '\0'; + if (checkDuplicateKey(key, pHash)) { + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + pKV->key = calloc(len + 1, 1); memcpy(pKV->key, key, len + 1); //tscDebug("Key:%s|len:%d", pKV->key, len); @@ -1608,7 +1636,8 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index } static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, - const char **index, bool isField, TAOS_SML_DATA_POINT* smlData) { + const char **index, bool isField, + TAOS_SML_DATA_POINT* smlData, SHashObj *pHash) { const char *cur = *index; int32_t ret = TSDB_CODE_SUCCESS; TAOS_SML_KV *pkv; @@ -1628,7 +1657,7 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, } while (*cur != '\0') { - ret = parseSmlKey(pkv, &cur); + ret = parseSmlKey(pkv, &cur, pHash); if (ret) { tscError("Unable to parse key field"); goto error; @@ -1712,31 +1741,36 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData) { int32_t ret = TSDB_CODE_SUCCESS; uint8_t has_tags = 0; TAOS_SML_KV *timestamp = NULL; + SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); ret = parseSmlMeasurement(smlData, &index, &has_tags); if (ret) { tscError("Unable to parse measurement"); + taosHashCleanup(keyHashTable); return ret; } tscDebug("Parse measurement finished, has_tags:%d", has_tags); //Parse Tags if (has_tags) { - ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData); + ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable); if (ret) { tscError("Unable to parse tag"); + taosHashCleanup(keyHashTable); return ret; } } tscDebug("Parse tags finished, num of tags:%d", smlData->tagNum); //Parse fields - ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData); + ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable); if (ret) { tscError("Unable to parse field"); + taosHashCleanup(keyHashTable); return ret; } tscDebug("Parse fields finished, num of fields:%d", smlData->fieldNum); + taosHashCleanup(keyHashTable); //Parse timestamp ret = parseSmlTimeStamp(×tamp, &index); diff --git a/tests/examples/c/schemaless.c b/tests/examples/c/schemaless.c index d6450914df..1be44bf8e8 100644 --- a/tests/examples/c/schemaless.c +++ b/tests/examples/c/schemaless.c @@ -157,5 +157,45 @@ int main(int argc, char* argv[]) { return -1; } + //Duplicate key check; + char* lines_003_1[] = { + "std,id=\"std_3_1\",t1=4i64,Id=\"std\",t2=true c1=true 1626006834s" + }; + + code = taos_insert_lines(taos, lines_003_1 , sizeof(lines_003_1)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_003_1 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_003_2[] = { + "std,id=\"std_3_2\",tag1=4i64,Tag2=true,tAg3=2,TaG2=\"dup!\" c1=true 1626006834s" + }; + + code = taos_insert_lines(taos, lines_003_2 , sizeof(lines_003_2)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_003_2 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_003_3[] = { + "std,id=\"std_3_3\",tag1=4i64 field1=true,Field2=2,FIElD1=\"dup!\",fIeLd4=true 1626006834s" + }; + + code = taos_insert_lines(taos, lines_003_3 , sizeof(lines_003_3)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_003_3 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_003_4[] = { + "std,id=\"std_3_4\",tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=\"1234\" 1626006834s" + }; + + code = taos_insert_lines(taos, lines_003_4 , sizeof(lines_003_4)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_003_4 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } return 0; } From e2635df55df1a795102c2a48ec69a55e1fdb2d58 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 22 Jul 2021 19:25:11 +0800 Subject: [PATCH 13/81] [TD-5438] fix bug --- src/client/src/tscSQLParser.c | 3 +-- tests/pytest/query/queryError.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 513a116cf7..c668619eb6 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5160,7 +5160,6 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq int32_t ret = TSDB_CODE_SUCCESS; - pQueryInfo->onlyHasTagCond = true; // tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space SStringBuilder sb; memset(&sb, 0, sizeof(sb)); SCondExpr condExpr = {0}; @@ -8571,7 +8570,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - + pQueryInfo->onlyHasTagCond = true; // set where info if (pSqlNode->pWhere != NULL) { if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py index 539ce5141f..ac78c0518f 100644 --- a/tests/pytest/query/queryError.py +++ b/tests/pytest/query/queryError.py @@ -51,7 +51,7 @@ class TDTestCase: tdSql.error("select last_row as latest from st") # query distinct on normal colnum - tdSql.error("select distinct tagtype from st") + #tdSql.error("select distinct tagtype from st") # query .. order by non-time field tdSql.error("select * from st order by name") From 1bdd6838724d0fa00f4948bd3c702c59b97fe44a Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Thu, 22 Jul 2021 19:59:36 +0800 Subject: [PATCH 14/81] [TD-5369] taosdemo test case for nano support , the sql files is must be here! --- tests/pytest/fulltest.sh | 8 + .../tools/taosdemoAllTest/nano_samples.csv | 100 +++++++++++ .../tools/taosdemoAllTest/nano_sampletags.csv | 100 +++++++++++ .../taosdemoTestNanoCreateDB.sql | 7 + .../taosdemoTestNanoDatabase.json | 88 ++++++++++ .../taosdemoTestNanoDatabaseInsertForSub.json | 84 +++++++++ .../taosdemoTestNanoDatabaseNow.json | 62 +++++++ .../taosdemoTestNanoDatabasecsv.json | 84 +++++++++ .../taosdemoTestSupportNanoInsert.py | 162 ++++++++++++++++++ .../taosdemoTestSupportNanoQuery.json | 92 ++++++++++ .../taosdemoTestSupportNanoQuery.py | 157 +++++++++++++++++ .../taosdemoTestSupportNanoQuerycsv.json | 110 ++++++++++++ .../taosdemoTestSupportNanoSubscribe.json | 32 ++++ .../taosdemoTestSupportNanosubscribe.py | 123 +++++++++++++ 14 files changed, 1209 insertions(+) create mode 100644 tests/pytest/tools/taosdemoAllTest/nano_samples.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 80d45fbc31..99a15e2e71 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -361,4 +361,12 @@ python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_cacheLastRow.py python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py + +# nano support +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py + + #======================p4-end=============== +python3 test.py -f tools/taosdemoAllTest/pytest.py \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/nano_samples.csv b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv new file mode 100644 index 0000000000..5fc779b41b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv @@ -0,0 +1,100 @@ +8.855,"binary_str0" ,1626870128248246976 +8.75,"binary_str1" ,1626870128249060032 +5.44,"binary_str2" ,1626870128249067968 +8.45,"binary_str3" ,1626870128249072064 +4.07,"binary_str4" ,1626870128249075904 +6.97,"binary_str5" ,1626870128249078976 +6.86,"binary_str6" ,1626870128249082048 +1.585,"binary_str7" ,1626870128249085120 +1.4,"binary_str8" ,1626870128249087936 +5.135,"binary_str9" ,1626870128249092032 +3.15,"binary_str10" ,1626870128249095104 +1.765,"binary_str11" ,1626870128249097920 +7.71,"binary_str12" ,1626870128249100992 +3.91,"binary_str13" ,1626870128249104064 +5.615,"binary_str14" ,1626870128249106880 +9.495,"binary_str15" ,1626870128249109952 +3.825,"binary_str16" ,1626870128249113024 +1.94,"binary_str17" ,1626870128249117120 +5.385,"binary_str18" ,1626870128249119936 +7.075,"binary_str19" ,1626870128249123008 +5.715,"binary_str20" ,1626870128249126080 +1.83,"binary_str21" ,1626870128249128896 +6.365,"binary_str22" ,1626870128249131968 +6.55,"binary_str23" ,1626870128249135040 +6.315,"binary_str24" ,1626870128249138112 +3.82,"binary_str25" ,1626870128249140928 +2.455,"binary_str26" ,1626870128249145024 +7.795,"binary_str27" ,1626870128249148096 +2.47,"binary_str28" ,1626870128249150912 +1.37,"binary_str29" ,1626870128249155008 +5.39,"binary_str30" ,1626870128249158080 +5.13,"binary_str31" ,1626870128249160896 +4.09,"binary_str32" ,1626870128249163968 +5.855,"binary_str33" ,1626870128249167040 +0.17,"binary_str34" ,1626870128249170112 +1.955,"binary_str35" ,1626870128249173952 +0.585,"binary_str36" ,1626870128249178048 +0.33,"binary_str37" ,1626870128249181120 +7.925,"binary_str38" ,1626870128249183936 +9.685,"binary_str39" ,1626870128249187008 +2.6,"binary_str40" ,1626870128249191104 +5.705,"binary_str41" ,1626870128249193920 +3.965,"binary_str42" ,1626870128249196992 +4.43,"binary_str43" ,1626870128249200064 +8.73,"binary_str44" ,1626870128249202880 +3.105,"binary_str45" ,1626870128249205952 +9.39,"binary_str46" ,1626870128249209024 +2.825,"binary_str47" ,1626870128249212096 +9.675,"binary_str48" ,1626870128249214912 +9.99,"binary_str49" ,1626870128249217984 +4.51,"binary_str50" ,1626870128249221056 +4.94,"binary_str51" ,1626870128249223872 +7.72,"binary_str52" ,1626870128249226944 +4.135,"binary_str53" ,1626870128249231040 +2.325,"binary_str54" ,1626870128249234112 +4.585,"binary_str55" ,1626870128249236928 +8.76,"binary_str56" ,1626870128249240000 +4.715,"binary_str57" ,1626870128249243072 +0.56,"binary_str58" ,1626870128249245888 +5.35,"binary_str59" ,1626870128249249984 +5.075,"binary_str60" ,1626870128249253056 +6.665,"binary_str61" ,1626870128249256128 +7.13,"binary_str62" ,1626870128249258944 +2.775,"binary_str63" ,1626870128249262016 +5.775,"binary_str64" ,1626870128249265088 +1.62,"binary_str65" ,1626870128249267904 +1.625,"binary_str66" ,1626870128249270976 +8.15,"binary_str67" ,1626870128249274048 +0.75,"binary_str68" ,1626870128249277120 +3.265,"binary_str69" ,1626870128249280960 +8.585,"binary_str70" ,1626870128249284032 +1.88,"binary_str71" ,1626870128249287104 +8.44,"binary_str72" ,1626870128249289920 +5.12,"binary_str73" ,1626870128249295040 +2.58,"binary_str74" ,1626870128249298112 +9.42,"binary_str75" ,1626870128249300928 +1.765,"binary_str76" ,1626870128249304000 +2.66,"binary_str77" ,1626870128249308096 +1.405,"binary_str78" ,1626870128249310912 +5.595,"binary_str79" ,1626870128249315008 +2.28,"binary_str80" ,1626870128249318080 +9.24,"binary_str81" ,1626870128249320896 +9.03,"binary_str82" ,1626870128249323968 +6.055,"binary_str83" ,1626870128249327040 +1.74,"binary_str84" ,1626870128249330112 +5.77,"binary_str85" ,1626870128249332928 +1.97,"binary_str86" ,1626870128249336000 +0.3,"binary_str87" ,1626870128249339072 +7.145,"binary_str88" ,1626870128249342912 +0.88,"binary_str89" ,1626870128249345984 +8.025,"binary_str90" ,1626870128249349056 +4.81,"binary_str91" ,1626870128249351872 +0.725,"binary_str92" ,1626870128249355968 +3.85,"binary_str93" ,1626870128249359040 +9.455,"binary_str94" ,1626870128249362112 +2.265,"binary_str95" ,1626870128249364928 +3.985,"binary_str96" ,1626870128249368000 +9.375,"binary_str97" ,1626870128249371072 +0.2,"binary_str98" ,1626870128249373888 +6.95,"binary_str99" ,1626870128249377984 diff --git a/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv new file mode 100644 index 0000000000..18fb855d6d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv @@ -0,0 +1,100 @@ +"string0",7,8.615 +"string1",4,9.895 +"string2",3,2.92 +"string3",3,5.62 +"string4",7,1.615 +"string5",6,1.45 +"string6",5,7.48 +"string7",7,3.01 +"string8",5,4.76 +"string9",10,7.09 +"string10",2,8.38 +"string11",7,8.65 +"string12",5,5.025 +"string13",10,5.765 +"string14",2,4.57 +"string15",2,1.03 +"string16",7,6.98 +"string17",10,0.23 +"string18",7,5.815 +"string19",1,2.37 +"string20",10,8.865 +"string21",3,1.235 +"string22",2,8.62 +"string23",9,1.045 +"string24",8,4.34 +"string25",1,5.455 +"string26",2,4.475 +"string27",1,6.95 +"string28",2,3.39 +"string29",3,6.79 +"string30",7,9.735 +"string31",1,9.79 +"string32",10,9.955 +"string33",1,5.095 +"string34",3,3.86 +"string35",9,5.105 +"string36",10,4.22 +"string37",1,2.78 +"string38",9,6.345 +"string39",1,0.975 +"string40",5,6.16 +"string41",4,7.735 +"string42",5,6.6 +"string43",8,2.845 +"string44",1,0.655 +"string45",3,2.995 +"string46",9,3.6 +"string47",8,3.47 +"string48",3,7.98 +"string49",6,2.225 +"string50",9,5.44 +"string51",4,6.335 +"string52",3,2.955 +"string53",1,0.565 +"string54",6,5.575 +"string55",6,9.905 +"string56",9,6.025 +"string57",8,0.94 +"string58",10,0.15 +"string59",8,1.555 +"string60",4,2.28 +"string61",2,8.29 +"string62",9,6.22 +"string63",6,3.35 +"string64",10,6.7 +"string65",3,9.345 +"string66",7,9.815 +"string67",1,5.365 +"string68",10,3.81 +"string69",1,6.405 +"string70",8,2.715 +"string71",3,8.58 +"string72",8,6.34 +"string73",2,7.49 +"string74",4,8.64 +"string75",3,8.995 +"string76",7,3.465 +"string77",1,7.64 +"string78",6,3.65 +"string79",6,1.4 +"string80",6,5.875 +"string81",2,1.22 +"string82",5,7.87 +"string83",9,8.41 +"string84",9,8.9 +"string85",9,3.89 +"string86",2,5.0 +"string87",2,4.495 +"string88",4,2.835 +"string89",3,5.895 +"string90",7,8.41 +"string91",5,5.125 +"string92",7,9.165 +"string93",5,8.315 +"string94",10,7.485 +"string95",7,4.635 +"string96",2,6.015 +"string97",8,0.595 +"string98",3,8.79 +"string99",4,1.72 diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql new file mode 100644 index 0000000000..e79e09592c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql @@ -0,0 +1,7 @@ +drop database if exists nsdbsql; +create database nsdbsql precision "ns" keep 36 days 6 update 1; +use nsdbsql; +CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); +CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); +INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); +INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76); diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json new file mode 100644 index 0000000000..9010415fe6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json new file mode 100644 index 0000000000..0726f3905d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "subnsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json new file mode 100644 index 0000000000..d2542a0eba --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json new file mode 100644 index 0000000000..867619ed8c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdbcsv", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py new file mode 100644 index 0000000000..010308d037 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -0,0 +1,162 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert data from a special timestamp + # check stable stb0 + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) + tdSql.execute("use nsdb") + tdSql.query("show stables") + tdSql.checkData(0, 4, 1000) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000000) + tdSql.query("describe stb0") + tdSql.getData(8, 1) + tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.getData(0, 0) + + # check stable stb1 which is insert with disord + + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb1_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10000000) + # check c8 is an nano timestamp + tdSql.query("describe stb1") + tdSql.checkDataType(8, 1,"TIMESTAMP") + # check insert timestamp_step is nano_second + tdSql.query("select last(ts) from stb1") + tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") + + # insert data from now time + + # check stable stb0 + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath) + tdSql.execute("use nsdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 1000) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + # check c8 is an nano timestamp + tdSql.query("describe stb0") + tdSql.checkDataType(8,1,"TIMESTAMP") + + # insert by csv files and timetamp is long int , strings in ts and cols + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") + tdSql.checkData(0, 0, 10000) + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") + + + + + + + + + + + # taosdemo test insert with command and parameter , detals show taosdemo --help + + os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 600) + + os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 600) + + # check taosdemo -s + + os.system("%staosdemo -s tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql -y " % binPath) + tdSql.query("select count(*) from nsdbsql.meters") + tdSql.checkData(0, 0, 2) + + + + os.system("rm -rf ./res.txt") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json new file mode 100644 index 0000000000..fff1017588 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json @@ -0,0 +1,92 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdb", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts>now-20d ;", + "result": "./query_res2.txt" + }, + { + "sql": "select max(c10) from stb0;", + "result": "./query_res3.txt" + }, + { + "sql": "select min(c1) from stb0;", + "result": "./query_res4.txt" + }, + { + "sql": "select avg(c1) from stb0;", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c10) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c1) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c10) from xxxx ;", + "result": "./query_res_tb7.txt" + + } + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py new file mode 100644 index 0000000000..21a7037ce6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py @@ -0,0 +1,157 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # query: query test for nanoSecond with where and max min groupby order + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) + + tdSql.execute("use nsdb") + + # use where to filter + + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") + tdSql.checkData(0, 0, 3999000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") + tdSql.checkData(0, 0, 1000000) + + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") + tdSql.checkData(0, 0, 3999) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") + tdSql.checkData(0, 0, 1000) + + + # select max min avg from special col + tdSql.query("select max(c10) from stb0;") + print("select max(c10) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select max(c10) from tb0_0;") + print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0)) + + + tdSql.query("select min(c1) from stb0;") + print( "select min(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select min(c1) from tb0_0;") + print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from stb0;") + print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from tb0_0;") + print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select count(*) from stb0 group by tbname;") + tdSql.checkData(0, 0, 10000) + tdSql.checkData(100, 0, 10000) + + # query : query above sqls by taosdemo and continuously + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json -y " % binPath) + + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") + tdSql.checkData(0, 0, 10000) + tdSql.execute('select count(*) from stb0 where c2 > 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"') + tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000') + tdSql.query('select avg(c0) from stb0 interval(5000000000b)') + tdSql.checkRows(1) + + tdSql.query('select avg(c0) from stb0 interval(100000000b)') + tdSql.checkRows(10) + + tdSql.error('select avg(c0) from stb0 interval(1b)') + tdSql.error('select avg(c0) from stb0 interval(999b)') + + tdSql.query('select avg(c0) from stb0 interval(1000b)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(1u)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)') + tdSql.checkRows(10) + + # query : query above sqls by taosdemo and continuously + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json -y " % binPath) + + os.system("rm -rf ./query_res*.txt*") + os.system("rm -rf tools/taosdemoAllTest/*.py.sql") + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json new file mode 100644 index 0000000000..aa95837a33 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json @@ -0,0 +1,110 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdbcsv", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts > now -22d-1h-3s ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts < 1626918583000000000 ;", + "result": "./query_res2.txt" + }, + { + "sql": "select count(*) from stb0 where c2 <> 162687012800000000';", + "result": "./query_res3.txt" + }, + { + "sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";", + "result": "./query_res4.txt" + }, + { + "sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + }, + { + "sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;", + "result":"./query_res7.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(5000000000b);", + "result":"./query_res8.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);", + "result":"./query_res9.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c0) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c0) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c0) from xxxx ;", + "result": "./query_res_tb7.txt" + + }, + { + "sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;", + "result": "./query_res_tb8.txt" + + } + + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json new file mode 100644 index 0000000000..26d405b65b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json @@ -0,0 +1,32 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "subnsdb", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":10000, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb0 where ts > now -20d-1h-3s ;", + "result": "./subscribe_res1.txt" + }, + { + "sql": "select * from stb0 where ts < 1626918583000000000 ;", + "result": "./subscribe_res2.txt" + }] + + } +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py new file mode 100644 index 0000000000..f6324577c1 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py @@ -0,0 +1,123 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime +import subprocess + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # get the number of subscriptions + def subTimes(self,filename): + self.filename = filename + command = 'cat %s |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + # assert results + def assertCheck(self,filename,subResult,expectResult): + self.filename = filename + self.subResult = subResult + self.expectResult = expectResult + args0 = (filename, subResult, expectResult) + assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # clear env + os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe_res*") + + + # insert data + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) + os.system("nohup %staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json &" % binPath) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + + + # merge result files + sleep(20) + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") + os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") + + + # correct subscribeTimes testcase + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) + + subTimes1 = self.subTimes("all_subscribe_res1.txt") + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,0) + + subTimes2 = self.subTimes("all_subscribe_res2.txt") + self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) + + + # insert extral data + tdSql.execute("use subnsdb") + tdSql.execute("insert into tb0_0 values(now,100.1000,'subtest1',now-1s)") + sleep(1) + + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) + + + + # correct data testcase + os.system("kill -9 %d" % query_pid) + sleep(3) + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe*") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From c133eb1cfea4803c8b851eb4540de82f2bf5daf8 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Fri, 23 Jul 2021 10:07:02 +0800 Subject: [PATCH 15/81] [TD-5369] modify an error about insert --- .../tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 010308d037..76b5871de6 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -113,10 +113,6 @@ class TDTestCase: tdSql.checkDataType(3, 1, "TIMESTAMP") tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") - tdSql.checkData(0, 0, 10000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") - tdSql.checkData(0, 0, 10000) tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") tdSql.checkData(0, 0, 10000) From 07bf3966ec55d70721cf886849f74e56d1321ee8 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 23 Jul 2021 11:10:45 +0800 Subject: [PATCH 16/81] [TD-5213]:test 4096 columns with taosdemo --- .../TD-5213/insertSigcolumnsNum4096.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json index 03958f2e6f..25af3a1041 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -35,13 +35,13 @@ "super_tables": [{ "name": "stb_old", "child_table_exists":"no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "stb_old_", "auto_create_table": "no", "batch_create_tbl_num": 5, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 100, + "insert_rows": 10, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", @@ -60,13 +60,13 @@ },{ "name": "stb_new", "child_table_exists":"no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "stb_new_", "auto_create_table": "no", "batch_create_tbl_num": 5, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 100, + "insert_rows": 10, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", @@ -85,13 +85,13 @@ },{ "name": "stb_mix", "child_table_exists":"no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "stb_mix_", "auto_create_table": "no", "batch_create_tbl_num": 5, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 100, + "insert_rows": 10, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", @@ -110,13 +110,13 @@ },{ "name": "stb_excel", "child_table_exists":"no", - "childtable_count": 10, + "childtable_count": 1, "childtable_prefix": "stb_excel_", "auto_create_table": "no", "batch_create_tbl_num": 5, "data_source": "sample", "insert_mode": "taosc", - "insert_rows": 100, + "insert_rows": 10, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", From 0b7a625744e41a478c34430f86abedb771b4c15d Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 23 Jul 2021 11:10:59 +0800 Subject: [PATCH 17/81] [TD-5213]:test 4096 columns with taosdemo --- .../TD-5213/insertSigcolumnsNum4096.py | 60 +++++++++---------- 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py index 511c274827..eb844b6fe2 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py @@ -13,6 +13,7 @@ import sys import os +import time from util.log import * from util.cases import * from util.sql import * @@ -50,29 +51,30 @@ class TDTestCase: #-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force #regular old && new - os.system("%staosdemo -N -d regular_old -t 1000 -n 10 -l 1023 -y" % binPath) + startTime = time.time() + os.system("%staosdemo -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath) tdSql.execute("use regular_old") tdSql.query("show tables;") - tdSql.checkRows(1000) + tdSql.checkRows(1) tdSql.query("select * from d0;") tdSql.checkCols(1024) tdSql.query("describe d0;") tdSql.checkRows(1024) - os.system("%staosdemo -N -d regular_new -t 1000 -n 10 -l 4095 -y" % binPath) + os.system("%staosdemo -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath) tdSql.execute("use regular_new") tdSql.query("show tables;") - tdSql.checkRows(1000) + tdSql.checkRows(1) tdSql.query("select * from d0;") tdSql.checkCols(4096) tdSql.query("describe d0;") tdSql.checkRows(4096) #super table -d:database name -t:table num -n:rows num per table -l:col num -y:force - os.system("%staosdemo -d super_old -t 1000 -n 10 -l 1021 -y" % binPath) + os.system("%staosdemo -d super_old -t 1 -n 10 -l 1021 -y" % binPath) tdSql.execute("use super_old") tdSql.query("show tables;") - tdSql.checkRows(1000) + tdSql.checkRows(1) tdSql.query("select * from meters;") tdSql.checkCols(1024) tdSql.query("select * from d0;") @@ -82,10 +84,10 @@ class TDTestCase: tdSql.query("describe d0;") tdSql.checkRows(1024) - os.system("%staosdemo -d super_new -t 1000 -n 10 -l 4093 -y" % binPath) + os.system("%staosdemo -d super_new -t 1 -n 10 -l 4093 -y" % binPath) tdSql.execute("use super_new") tdSql.query("show tables;") - tdSql.checkRows(1000) + tdSql.checkRows(1) tdSql.query("select * from meters;") tdSql.checkCols(4096) tdSql.query("select * from d0;") @@ -100,33 +102,29 @@ class TDTestCase: tdSql.query("describe stb_new1_1;") tdSql.checkRows(4096) - tdLog.info("stop dnode to commit data to disk") - tdDnodes.stop(1) - tdDnodes.start(1) - # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-5213 os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath) tdSql.execute("use json") tdSql.query("select count (tbname) from stb_old") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 1) tdSql.query("select * from stb_old") - tdSql.checkRows(1000) + tdSql.checkRows(10) tdSql.checkCols(1024) tdSql.query("select count (tbname) from stb_new") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 1) tdSql.query("select * from stb_new") - tdSql.checkRows(1000) + tdSql.checkRows(10) tdSql.checkCols(4096) tdSql.query("describe stb_new;") tdSql.checkRows(4096) - tdSql.query("select * from stb_new_1") - tdSql.checkRows(100) + tdSql.query("select * from stb_new_0") + tdSql.checkRows(10) tdSql.checkCols(4091) - tdSql.query("describe stb_new_1;") + tdSql.query("describe stb_new_0;") tdSql.checkRows(4096) tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)") tdSql.query("select * from stb_new1_1") @@ -135,34 +133,34 @@ class TDTestCase: tdSql.checkRows(4096) tdSql.query("select count (tbname) from stb_mix") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 1) tdSql.query("select * from stb_mix") - tdSql.checkRows(1000) + tdSql.checkRows(10) tdSql.checkCols(4096) tdSql.query("describe stb_mix;") tdSql.checkRows(4096) - tdSql.query("select * from stb_mix_1") - tdSql.checkRows(100) + tdSql.query("select * from stb_mix_0") + tdSql.checkRows(10) tdSql.checkCols(4092) - tdSql.query("describe stb_mix_1;") + tdSql.query("describe stb_mix_0;") tdSql.checkRows(4096) tdSql.query("select count (tbname) from stb_excel") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 1) tdSql.query("select * from stb_excel") - tdSql.checkRows(1000) + tdSql.checkRows(10) tdSql.checkCols(4096) tdSql.query("describe stb_excel;") tdSql.checkRows(4096) - tdSql.query("select * from stb_excel_1") - tdSql.checkRows(100) + tdSql.query("select * from stb_excel_0") + tdSql.checkRows(10) tdSql.checkCols(4092) - tdSql.query("describe stb_excel_1;") + tdSql.query("describe stb_excel_0;") tdSql.checkRows(4096) - - + endTime = time.time() + print("total time %ds" % (endTime - startTime)) os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql") From ea0cc35af4d2db6f92f0a0b74e511deb01d2633c Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 23 Jul 2021 14:49:15 +0800 Subject: [PATCH 18/81] [TD-5074]test operator --- tests/pytest/fulltest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index e452ed3de4..eb068b6585 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -241,6 +241,7 @@ python3 ./test.py -f query/queryStateWindow.py python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py +python3 ./test.py -f query/operator.py python3 test.py -f query/nestedQuery/queryWithSpread.py #stream From 11d5b3231d91d78609a0141ef3f3200579f1813d Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 23 Jul 2021 14:49:27 +0800 Subject: [PATCH 19/81] [TD-5074]test operator --- tests/pytest/query/operator.py | 539 +++++++++++++++++++++++++++++++++ 1 file changed, 539 insertions(+) create mode 100644 tests/pytest/query/operator.py diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py new file mode 100644 index 0000000000..b94d5fa3b3 --- /dev/null +++ b/tests/pytest/query/operator.py @@ -0,0 +1,539 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5074 + + startTime = time.time() + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + tdSql.execute('''create table table_21 using stable_2 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdLog.info("========== operator=1(OP_TableScan) ==========") + tdLog.info("========== operator=7(OP_Project) ==========") + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=6(OP_Aggregate) ==========") + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=9(OP_Limit) ==========") + sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' + tdSql.query(sql) + tdSql.checkRows(5) + sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=16(OP_DummyInput) ==========") + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + + tdLog.info("========== operator=10(OP_SLimit) ==========") + sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=20(OP_Distinct) ==========") + tdLog.info("========== operator=4(OP_TagScan) ==========") + sql = '''select distinct(t_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + sql = '''select distinct(loc) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + sql = '''select distinct(tbname) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + + tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") + sql = '''select last(q_int),first(q_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bool),first(q_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_binary),first(q_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_float),first(q_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_double),first(q_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_ts),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), + first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), + first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=8(OP_Groupby) ==========") + sql = '''select stddev(q_int) from table_0 group by q_int;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' + tdSql.query(sql) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' + tdSql.query(sql) + + tdLog.info("========== operator=11(OP_TimeWindow) ==========") + sql = '''select last(q_int) from table_0 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=12(OP_SessionWindow) ==========") + sql = '''select count(*) from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*) from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=13(OP_Fill) ==========") + sql = '''select sum(q_int) from table_0 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + #TD-5190 + sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + + tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") + sql = '''select avg(q_int) from stable_1 where ts=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having sum(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having avg(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having min(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having max(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having first(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having last(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + + tdLog.info("========== operator=21(OP_Join) ==========") + sql = '''select t1.q_int,t2.q_int from + (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from table_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from table_0) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from stable_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.*,t3.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 + where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + + tdLog.info("========== operator=22(OP_StateWindow) ==========") + sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 state_window(q_bigint);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 state_window(q_smallint);''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 1da9c4c9d399b2118b600cc34a16aaae5f878150 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 23 Jul 2021 15:57:45 +0800 Subject: [PATCH 20/81] [TD-5451] add integer&float boundary check --- src/client/src/tscParseLineProtocol.c | 173 +++++++++++++++++++------- src/inc/ttype.h | 2 + 2 files changed, 133 insertions(+), 42 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index cccc81274d..d59be5e79f 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -1217,6 +1217,122 @@ static bool isTimeStamp(char *pVal, uint16_t len, SMLTimeStampType *tsType) { return false; } +static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { + errno = 0; + uint8_t type = pVal->type; + int16_t length = pVal->length; + int64_t val_s; + uint64_t val_u; + double val_d; + + printf("origin str:%s\n", str); + if (IS_FLOAT_TYPE(type)) { + val_d = strtod(str, NULL); + } else { + if (IS_SIGNED_NUMERIC_TYPE(type)) { + val_s = strtoll(str, NULL, 10); + } else { + val_u = strtoull(str, NULL, 10); + } + } + + if (errno == ERANGE) { + printf("out of range\n"); + return false; + } + + switch (type) { + case TSDB_DATA_TYPE_TINYINT: + if (!IS_VALID_TINYINT(val_s)) { + printf("tiny int out of range\n"); + return false; + } + pVal->value = calloc(length, 1); + *(int8_t *)(pVal->value) = (int8_t)val_s; + printf("tiny int:%d\n", *(int8_t *)(pVal->value)); + break; + case TSDB_DATA_TYPE_UTINYINT: + if (!IS_VALID_UTINYINT(val_u)) { + return false; + } + pVal->value = calloc(length, 1); + *(uint8_t *)(pVal->value) = (uint8_t)val_u; + printf("tiny uint:%u\n", *(uint8_t *)(pVal->value)); + break; + case TSDB_DATA_TYPE_SMALLINT: + if (!IS_VALID_SMALLINT(val_s)) { + printf("small int out of range\n"); + return false; + } + pVal->value = calloc(length, 1); + *(int16_t *)(pVal->value) = (int16_t)val_s; + printf("small int:%d\n", *(int16_t *)(pVal->value)); + break; + case TSDB_DATA_TYPE_USMALLINT: + if (!IS_VALID_USMALLINT(val_u)) { + return false; + } + pVal->value = calloc(length, 1); + *(uint16_t *)(pVal->value) = (uint16_t)val_u; + printf("small uint:%u\n", *(uint16_t *)(pVal->value)); + break; + case TSDB_DATA_TYPE_INT: + if (!IS_VALID_INT(val_s)) { + printf("int out of range\n"); + return false; + } + pVal->value = calloc(length, 1); + *(int32_t *)(pVal->value) = (int32_t)val_s; + printf("int:%d\n", *(int32_t *)(pVal->value)); + break; + case TSDB_DATA_TYPE_UINT: + if (!IS_VALID_UINT(val_u)) { + return false; + } + pVal->value = calloc(length, 1); + *(uint32_t *)(pVal->value) = (uint32_t)val_u; + printf("uint:%u\n", *(uint32_t *)(pVal->value)); + break; + case TSDB_DATA_TYPE_BIGINT: + if (!IS_VALID_BIGINT(val_s)) { + printf("big int out of range\n"); + return false; + } + pVal->value = calloc(length, 1); + *(int64_t *)(pVal->value) = (int64_t)val_s; + printf("big int:%ld\n", *(int64_t *)(pVal->value)); + break; + case TSDB_DATA_TYPE_UBIGINT: + if (!IS_VALID_UBIGINT(val_u)) { + return false; + } + pVal->value = calloc(length, 1); + *(uint64_t *)(pVal->value) = (uint64_t)val_u; + printf("big uint:%lu\n", *(uint64_t *)(pVal->value)); + break; + case TSDB_DATA_TYPE_FLOAT: + if (!IS_VALID_FLOAT(val_d)) { + printf("float out of range\n"); + return false; + } + pVal->value = calloc(length, 1); + *(float *)(pVal->value) = (float)val_d; + printf("float:%.5e\n", *(float *)(pVal->value)); + break; + case TSDB_DATA_TYPE_DOUBLE: + if (!IS_VALID_DOUBLE(val_d)) { + printf("double out of range\n"); + return false; + } + pVal->value = calloc(length, 1); + *(double *)(pVal->value) = (double)val_d; + printf("double:%.5e\n", *(double *)(pVal->value)); + break; + default: + return false; + } + return true; +} //len does not include '\0' from value. static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, uint16_t len) { @@ -1229,97 +1345,72 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, pVal->type = TSDB_DATA_TYPE_TINYINT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 2] = '\0'; - if (!isValidInteger(value)) { + if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - int8_t val = (int8_t)strtoll(value, NULL, 10); - memcpy(pVal->value, &val, pVal->length); return true; } if (isTinyUint(value, len)) { pVal->type = TSDB_DATA_TYPE_UTINYINT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 2] = '\0'; - if (!isValidInteger(value)) { + if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - uint8_t val = (uint8_t)strtoul(value, NULL, 10); - memcpy(pVal->value, &val, pVal->length); return true; } if (isSmallInt(value, len)) { pVal->type = TSDB_DATA_TYPE_SMALLINT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 3] = '\0'; - if (!isValidInteger(value)) { + if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - int16_t val = (int16_t)strtoll(value, NULL, 10); - memcpy(pVal->value, &val, pVal->length); return true; } if (isSmallUint(value, len)) { pVal->type = TSDB_DATA_TYPE_USMALLINT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 3] = '\0'; - if (!isValidInteger(value)) { + if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - uint16_t val = (uint16_t)strtoul(value, NULL, 10); - memcpy(pVal->value, &val, pVal->length); - //memcpy(pVal->value, &val, pVal->length); return true; } if (isInt(value, len)) { pVal->type = TSDB_DATA_TYPE_INT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 3] = '\0'; - if (!isValidInteger(value)) { + if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - int32_t val = (int32_t)strtoll(value, NULL, 10); - memcpy(pVal->value, &val, pVal->length); return true; } if (isUint(value, len)) { pVal->type = TSDB_DATA_TYPE_UINT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 3] = '\0'; - if (!isValidInteger(value)) { + if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - uint32_t val = (uint32_t)strtoul(value, NULL, 10); - memcpy(pVal->value, &val, pVal->length); return true; } if (isBigInt(value, len)) { pVal->type = TSDB_DATA_TYPE_BIGINT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 3] = '\0'; - if (!isValidInteger(value)) { + if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - int64_t val = (int64_t)strtoll(value, NULL, 10); - memcpy(pVal->value, &val, pVal->length); return true; } if (isBigUint(value, len)) { pVal->type = TSDB_DATA_TYPE_UBIGINT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 3] = '\0'; - if (!isValidInteger(value)) { + if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - uint64_t val = (uint64_t)strtoul(value, NULL, 10); - memcpy(pVal->value, &val, pVal->length); return true; } //floating number @@ -1327,24 +1418,18 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, pVal->type = TSDB_DATA_TYPE_FLOAT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 3] = '\0'; - if (!isValidFloat(value)) { + if (!isValidFloat(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - float val = (float)strtold(value, NULL); - memcpy(pVal->value, &val, pVal->length); return true; } if (isDouble(value, len)) { pVal->type = TSDB_DATA_TYPE_DOUBLE; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; value[len - 3] = '\0'; - if (!isValidFloat(value)) { + if (!isValidFloat(value) || !convertStrToNumber(pVal, value)) { return false; } - pVal->value = calloc(pVal->length, 1); - double val = (double)strtold(value, NULL); - memcpy(pVal->value, &val, pVal->length); return true; } //binary @@ -1554,6 +1639,7 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index, if (!convertSmlValueType(pKV, value, len)) { //free previous alocated key field free(pKV->key); + pKV->key = NULL; free(value); return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; } @@ -1823,8 +1909,11 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { cleanup: tscDebug("taos_insert_lines finish inserting %d lines. code: %d", numLines, code); + numPoints = taosArrayGetSize(lpPoints); + TAOS_SML_DATA_POINT* ppoints = TARRAY_GET_START(lpPoints); + for (int i=0; i= 0 && (_t) < UINT16_MAX) #define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX) #define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX) +#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX) +#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX) static FORCE_INLINE bool isNull(const char *val, int32_t type) { switch (type) { From da57b6bec3676bf229d243799cb1f0d3bf387f0d Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Fri, 23 Jul 2021 18:49:27 +0800 Subject: [PATCH 21/81] [TD-5496]: add filter length when estimate query msg size --- src/client/inc/tscUtil.h | 1 + src/client/src/tscServer.c | 4 ++-- src/client/src/tscUtil.c | 16 ++++++++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 264070d70f..ac3ead086e 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -344,6 +344,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v STableMeta* tscTableMetaDup(STableMeta* pTableMeta); SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo); +int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo); int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr); void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e975dd7b06..bd80f46334 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -632,7 +632,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) { SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo)); - + int32_t srcColFilterSize = tscGetColFilterSerializeLen(pQueryInfo); size_t numOfExprs = tscNumOfExprs(pQueryInfo); int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2); @@ -653,7 +653,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) { tableSerialize = totalTables * sizeof(STableIdInfo); } - return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize + + return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + exprSize + tsBufSize + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 5809218e6a..c746484944 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4579,6 +4579,22 @@ static int32_t createTagColumnInfo(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInf return TSDB_CODE_SUCCESS; } +int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) { + int16_t numOfCols = (int16_t)taosArrayGetSize(pQueryInfo->colList); + int32_t len = 0; + + for(int32_t i = 0; i < numOfCols; ++i) { + SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i); + for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) { + len += sizeof(SColumnFilterInfo); + if (pCol->info.flist.filterInfo[j].filterstr) { + len += (size_t) pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE; + } + } + } + return len; +} + int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) { memset(pQueryAttr, 0, sizeof(SQueryAttr)); From 8c62d84a3d5f1d52191bcf4a99a9b4d75b21eca9 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Fri, 23 Jul 2021 20:05:08 +0800 Subject: [PATCH 22/81] [TD-5369] add test case about taosdemo params 'time_step' --- tests/pytest/fulltest.sh | 1 + .../taosdemoAllTest/taosdemoInsertMSDB.json | 63 ++++++++++ .../taosdemoAllTest/taosdemoInsertNanoDB.json | 63 ++++++++++ .../taosdemoAllTest/taosdemoInsertUSDB.json | 63 ++++++++++ .../taosdemoTestInsertTime_step.py | 115 ++++++++++++++++++ .../taosdemoTestNanoCreateDB.sql | 7 -- .../taosdemoTestSupportNanoInsert.py | 36 +++--- 7 files changed, 324 insertions(+), 24 deletions(-) create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py delete mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c9dd6d8e79..ab7dedc959 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -368,6 +368,7 @@ python3 test.py -f alter/alter_create_exception.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py python3 ./test.py -f insert/flushwhiledrop.py #======================p4-end=============== diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json new file mode 100644 index 0000000000..49ab6f3a43 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb3", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json new file mode 100644 index 0000000000..9a35df917d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json new file mode 100644 index 0000000000..631179dbae --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "us", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py new file mode 100644 index 0000000000..7b3b865df9 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py @@ -0,0 +1,115 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + + # check the params of taosdemo about time_step is nano + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertNanoDB.json -y " % binPath) + tdSql.execute("use testdb1") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000") + + # check the params of taosdemo about time_step is us + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertUSDB.json -y " % binPath) + tdSql.execute("use testdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000") + + # check the params of taosdemo about time_step is ms + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertMSDB.json -y " % binPath) + tdSql.execute("use testdb3") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:01:39.000") + + + os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql deleted file mode 100644 index e79e09592c..0000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql +++ /dev/null @@ -1,7 +0,0 @@ -drop database if exists nsdbsql; -create database nsdbsql precision "ns" keep 36 days 6 update 1; -use nsdbsql; -CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); -CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); -INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); -INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76); diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 76b5871de6..88a917da85 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -64,8 +64,7 @@ class TDTestCase: tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 10000000) tdSql.query("describe stb0") - tdSql.getData(8, 1) - tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1,"TIMESTAMP") tdSql.query("select last(ts) from stb0") tdSql.getData(0, 0) @@ -79,7 +78,7 @@ class TDTestCase: tdSql.checkData(0, 0, 10000000) # check c8 is an nano timestamp tdSql.query("describe stb1") - tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1,"TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") @@ -99,7 +98,7 @@ class TDTestCase: tdSql.checkData(0, 0, 100000) # check c8 is an nano timestamp tdSql.query("describe stb0") - tdSql.checkDataType(8,1,"TIMESTAMP") + tdSql.checkDataType(9,1,"TIMESTAMP") # insert by csv files and timetamp is long int , strings in ts and cols @@ -118,16 +117,6 @@ class TDTestCase: os.system("rm -rf ./insert_res.txt") os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") - - - - - - - - - - # taosdemo test insert with command and parameter , detals show taosdemo --help os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) @@ -140,13 +129,26 @@ class TDTestCase: # check taosdemo -s - os.system("%staosdemo -s tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql -y " % binPath) + sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;', + 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', + 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] + + with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files: + for sql in sqls_ls: + sql_files.write(sql+"\n") + sql_files.close() + + sleep(10) + + os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) tdSql.query("select count(*) from nsdbsql.meters") tdSql.checkData(0, 0, 2) - - os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") def stop(self): From 2323d5a22868649cb109c0c938cbab510881eb88 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:33:54 +0800 Subject: [PATCH 23/81] [td-225] update the test script. --- tests/pytest/tools/taosdemoAllTest/querrThreads0.json | 4 ++-- tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json | 4 ++-- tests/script/sh/stop_dnodes.sh | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json index 69557a7841..3999845dec 100644 --- a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json +++ b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json @@ -7,7 +7,7 @@ "password": "taosdata", "confirm_parameter_prompt": "no", "databases": "db", - "query_times":3, + "query_times": 3, "specified_table_query": { "query_interval": 0, "concurrent": 1, @@ -34,4 +34,4 @@ ] } } - \ No newline at end of file + diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json index 9074ae8fd1..646cbcfbe2 100644 --- a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json +++ b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json @@ -7,7 +7,7 @@ "password": "taosdata", "confirm_parameter_prompt": "no", "databases": "db", - "query_times":3, + "query_times": 3, "specified_table_query": { "query_interval": 0, "concurrent": 1, @@ -34,4 +34,4 @@ ] } } - \ No newline at end of file + diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh index 430f39901e..4c6d8e0351 100755 --- a/tests/script/sh/stop_dnodes.sh +++ b/tests/script/sh/stop_dnodes.sh @@ -14,7 +14,7 @@ while [ -n "$PID" ]; do echo kill -9 $PID pkill -9 taosd echo "Killing processes locking on port 6030" - if [[ "$OS_TYPE" != "Darwin" ]]; then + if [ "$OS_TYPE" != "Darwin" ]; then fuser -k -n tcp 6030 else lsof -nti:6030 | xargs kill -9 @@ -26,7 +26,7 @@ PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'` while [ -n "$PID" ]; do echo kill -9 $PID pkill -9 tarbitrator - if [[ "$OS_TYPE" != "Darwin" ]]; then + if [ "$OS_TYPE" != "Darwin" ]; then fuser -k -n tcp 6040 else lsof -nti:6040 | xargs kill -9 From 74f0030fc9e4062fc3327af5e50c4d87b78e9fb9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:38:05 +0800 Subject: [PATCH 24/81] [td-225] fix cache log error bug. --- src/util/src/tcache.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 4aa5b4378f..69b3741e13 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -132,11 +132,11 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo return; } - pCacheObj->totalSize -= pNode->size; + atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size); int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable); assert(size > 0); - uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, totalNum:%d size:%" PRId64 "bytes", + uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, total num:%d size:%" PRId64 "bytes", pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize); if (pCacheObj->freeFp) { @@ -252,6 +252,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v pCacheObj->freeFp(p->data); } + atomic_sub_fetch_64(&pCacheObj->totalSize, p->size); tfree(p); } else { taosAddToTrashcan(pCacheObj, p); @@ -302,7 +303,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen } SCacheDataNode* ptNode = NULL; - taosHashGetClone(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode, sizeof(void*)); + taosHashGetClone(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode); void* pData = (ptNode != NULL)? ptNode->data:NULL; @@ -679,7 +680,7 @@ void* taosCacheTimedRefresh(void *handle) { assert(pCacheArrayList != NULL); uDebug("cache refresh thread starts"); - setThreadName("cacheTimedRefre"); + setThreadName("cacheRefresh"); const int32_t SLEEP_DURATION = 500; //500 ms int64_t count = 0; From 7409fda3a266f9090067ae8beb95163bed4b1d68 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:47:37 +0800 Subject: [PATCH 25/81] [td-225] update the thread name --- src/balance/src/bnThread.c | 2 +- src/dnode/src/dnodeTelemetry.c | 2 +- src/dnode/src/dnodeVRead.c | 7 ++++--- src/dnode/src/dnodeVnodes.c | 1 - src/plugins/monitor/src/monMain.c | 2 +- src/query/src/qExecutor.c | 2 ++ src/rpc/src/rpcTcp.c | 5 ++--- src/rpc/test/rclient.c | 2 -- src/rpc/test/rsclient.c | 4 +--- src/sync/src/syncRetrieve.c | 1 - src/sync/test/syncClient.c | 2 -- src/sync/test/syncServer.c | 2 +- src/util/src/tlog.c | 7 +------ src/util/src/tsched.c | 4 +++- src/wal/src/walMgmt.c | 2 +- 15 files changed, 18 insertions(+), 27 deletions(-) diff --git a/src/balance/src/bnThread.c b/src/balance/src/bnThread.c index c5dca2da85..20da83ccba 100644 --- a/src/balance/src/bnThread.c +++ b/src/balance/src/bnThread.c @@ -23,7 +23,7 @@ static SBnThread tsBnThread; static void *bnThreadFunc(void *arg) { - setThreadName("bnThreadd"); + setThreadName("balance"); while (1) { pthread_mutex_lock(&tsBnThread.mutex); diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c index 59b66879d4..22a6dc5b19 100644 --- a/src/dnode/src/dnodeTelemetry.c +++ b/src/dnode/src/dnodeTelemetry.c @@ -245,7 +245,7 @@ static void* telemetryThread(void* param) { clock_gettime(CLOCK_REALTIME, &end); end.tv_sec += 300; // wait 5 minutes before send first report - setThreadName("telemetryThrd"); + setThreadName("telemetry"); while (!tsExit) { int r = 0; diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index e8003a8fe7..c404ab1a55 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -118,10 +118,11 @@ static void *dnodeProcessReadQueue(void *wparam) { SVReadMsg * pRead; int32_t qtype; void * pVnode; - char name[16]; - memset(name, 0, 16); - snprintf(name, 16, "%s-dnReadQ", pPool->name); + char* threadname = strcmp(pPool->name, "vquery") == 0? "dnodeQueryQ":"dnodeFetchQ"; + + char name[16] = {0}; + snprintf(name, tListLen(name), "%s", threadname); setThreadName(name); while (1) { diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index 8ea8e280de..a5b0e9fe30 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -90,7 +90,6 @@ static void *dnodeOpenVnode(void *param) { char stepDesc[TSDB_STEP_DESC_LEN] = {0}; dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum); - setThreadName("dnodeOpenVnode"); for (int32_t v = 0; v < pThread->vnodeNum; ++v) { diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index 960a097f5d..6e583fe0df 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -114,7 +114,7 @@ int32_t monStartSystem() { static void *monThreadFunc(void *param) { monDebug("starting to initialize monitor module ..."); - setThreadName("monThrd"); + setThreadName("monitor"); while (1) { static int32_t accessTimes = 0; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 982c45c441..f14b83101b 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6554,9 +6554,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { return NULL; } + SDistinctOperatorInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; + pRes->info.rows = 0; SSDataBlock* pBlock = NULL; while(1) { diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index e9feeef9d3..0449ecac8b 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -529,10 +529,9 @@ static void *taosProcessTcpData(void *param) { SFdObj *pFdObj; struct epoll_event events[maxEvents]; SRecvInfo recvInfo; - char name[16]; - memset(name, 0, sizeof(name)); - snprintf(name, 16, "%s-tcpData", pThreadObj->label); + char name[16] = {0}; + snprintf(name, tListLen(name), "%s-tcp", pThreadObj->label); setThreadName(name); while (1) { diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index de30114bd1..2f4433f1bb 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -48,8 +48,6 @@ static void *sendRequest(void *param) { SInfo *pInfo = (SInfo *)param; SRpcMsg rpcMsg = {0}; - setThreadName("sendCliReq"); - tDebug("thread:%d, start to send request", pInfo->index); while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/rpc/test/rsclient.c b/src/rpc/test/rsclient.c index 3e94a56efb..65170d4abb 100644 --- a/src/rpc/test/rsclient.c +++ b/src/rpc/test/rsclient.c @@ -40,9 +40,7 @@ static int terror = 0; static void *sendRequest(void *param) { SInfo *pInfo = (SInfo *)param; SRpcMsg rpcMsg, rspMsg; - - setThreadName("sendSrvReq"); - + tDebug("thread:%d, start to send request", pInfo->index); while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 89fdda0686..c86ab85499 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -415,7 +415,6 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) { } void *syncRetrieveData(void *param) { - setThreadName("syncRetrievData"); int64_t rid = (int64_t)param; SSyncPeer *pPeer = syncAcquirePeer(rid); if (pPeer == NULL) { diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c index 303d2376ef..23ea54ee0c 100644 --- a/src/sync/test/syncClient.c +++ b/src/sync/test/syncClient.c @@ -48,8 +48,6 @@ void *sendRequest(void *param) { SInfo * pInfo = (SInfo *)param; SRpcMsg rpcMsg = {0}; - setThreadName("sendCliReq"); - uDebug("thread:%d, start to send request", pInfo->index); while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index a3d0696648..4598e16a9d 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -178,7 +178,7 @@ void *processWriteQueue(void *param) { int type; void *item; - setThreadName("writeQ"); + setThreadName("syncWrite"); while (1) { int ret = taosReadQitem(qhandle, &type, &item); diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 88f57e8ac2..1ce3eadf58 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -178,8 +178,6 @@ static void *taosThreadToOpenNewFile(void *param) { char keepName[LOG_FILE_NAME_LEN + 20]; sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag); - setThreadName("openNewFile"); - tsLogObj.flag ^= 1; tsLogObj.lines = 0; char name[LOG_FILE_NAME_LEN + 20]; @@ -689,12 +687,9 @@ static void taosWriteLog(SLogBuff *tLogBuff) { static void *taosAsyncOutputLog(void *param) { SLogBuff *tLogBuff = (SLogBuff *)param; - - setThreadName("asyncOutputLog"); + setThreadName("log"); while (1) { - //tsem_wait(&(tLogBuff->buffNotEmpty)); - taosMsleep(writeInterval); // Polling the buffer diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c index 3d3dfd9899..b86ebb38bc 100644 --- a/src/util/src/tsched.c +++ b/src/util/src/tsched.c @@ -122,7 +122,9 @@ void *taosProcessSchedQueue(void *scheduler) { SSchedQueue *pSched = (SSchedQueue *)scheduler; int ret = 0; - setThreadName("schedQ"); + char name[16] = {0}; + snprintf(name, tListLen(name), "%s-taskQ", pSched->label); + setThreadName(name); while (1) { if ((ret = tsem_wait(&pSched->fullSem)) != 0) { diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 45f65b2c2f..05324d31ee 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -192,7 +192,7 @@ static void walFsyncAll() { static void *walThreadFunc(void *param) { int stop = 0; - setThreadName("walThrd"); + setThreadName("wal"); while (1) { walUpdateSeq(); walFsyncAll(); From e422c53df76e1cec85945f30b6d5baa0ec261923 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:49:55 +0800 Subject: [PATCH 26/81] [td-225] fix a typo. --- src/util/tests/trefTest.c | 8 -------- src/vnode/src/vnodeWorker.c | 6 +++--- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/src/util/tests/trefTest.c b/src/util/tests/trefTest.c index fe3dcab201..e01da070af 100644 --- a/src/util/tests/trefTest.c +++ b/src/util/tests/trefTest.c @@ -35,8 +35,6 @@ void *addRef(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id; - setThreadName("addRef"); - for (int i=0; i < pSpace->steps; ++i) { printf("a"); id = random() % pSpace->refNum; @@ -54,8 +52,6 @@ void *removeRef(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id, code; - setThreadName("removeRef"); - for (int i=0; i < pSpace->steps; ++i) { printf("d"); id = random() % pSpace->refNum; @@ -74,8 +70,6 @@ void *acquireRelease(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id; - setThreadName("acquireRelease"); - for (int i=0; i < pSpace->steps; ++i) { printf("a"); @@ -97,8 +91,6 @@ void myfree(void *p) { void *openRefSpace(void *param) { SRefSpace *pSpace = (SRefSpace *)param; - setThreadName("openRefSpace"); - printf("c"); pSpace->rsetId = taosOpenRef(50, myfree); diff --git a/src/vnode/src/vnodeWorker.c b/src/vnode/src/vnodeWorker.c index e94c99cbea..7fcc393746 100644 --- a/src/vnode/src/vnodeWorker.c +++ b/src/vnode/src/vnodeWorker.c @@ -25,7 +25,7 @@ typedef enum { VNODE_WORKER_ACTION_CLEANUP, - VNODE_WORKER_ACTION_DESTROUY + VNODE_WORKER_ACTION_DESTROY } EVMWorkerAction; typedef struct { @@ -155,7 +155,7 @@ int32_t vnodeCleanupInMWorker(SVnodeObj *pVnode) { int32_t vnodeDestroyInMWorker(SVnodeObj *pVnode) { vTrace("vgId:%d, will destroy in vmworker", pVnode->vgId); - return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROUY, NULL); + return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROY, NULL); } static void vnodeFreeMWorkerMsg(SVMWorkerMsg *pMsg) { @@ -179,7 +179,7 @@ static void vnodeProcessMWorkerMsg(SVMWorkerMsg *pMsg) { case VNODE_WORKER_ACTION_CLEANUP: vnodeCleanUp(pMsg->pVnode); break; - case VNODE_WORKER_ACTION_DESTROUY: + case VNODE_WORKER_ACTION_DESTROY: vnodeDestroy(pMsg->pVnode); break; default: From 1073ac5160200136446f18fc9acf185b3107e629 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:55:22 +0800 Subject: [PATCH 27/81] [td-5407]: add a local buffer for super table vgroup id list to improve the query performance. --- src/client/inc/tsclient.h | 6 +- src/client/src/tscLocal.c | 2 +- src/client/src/tscParseLineProtocol.c | 2 +- src/client/src/tscSQLParser.c | 83 +++++++++---- src/client/src/tscServer.c | 163 ++++++++++++++------------ src/client/src/tscStream.c | 2 +- src/client/src/tscSubquery.c | 2 +- src/client/src/tscSystem.c | 76 +++++++----- src/client/src/tscUtil.c | 10 +- src/query/src/qScript.c | 1 + src/util/inc/hash.h | 3 +- src/util/src/hash.c | 4 +- src/vnode/src/vnodeMgmt.c | 2 +- 13 files changed, 210 insertions(+), 146 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 83ec28898c..9a627d5cd6 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -151,7 +151,8 @@ typedef struct STableDataBlocks { typedef struct { STableMeta *pTableMeta; - SVgroupsInfo *pVgroupInfo; + SArray *vgroupIdList; +// SVgroupsInfo *pVgroupsInfo; } STableMetaVgroupInfo; typedef struct SInsertStatementParam { @@ -415,7 +416,8 @@ int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo); int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows); extern int32_t sentinel; extern SHashObj *tscVgroupMap; -extern SHashObj *tscTableMetaInfo; +extern SHashObj *tscTableMetaMap; +extern SCacheObj *tscVgroupListBuf; extern int tscObjRef; extern void *tscTmr; diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index d1a325be35..641f62f22b 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -920,7 +920,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) { pRes->code = tscProcessShowCreateDatabase(pSql); } else if (pCmd->command == TSDB_SQL_RESET_CACHE) { - taosHashClear(tscTableMetaInfo); + taosHashClear(tscTableMetaMap); pRes->code = TSDB_CODE_SUCCESS; } else if (pCmd->command == TSDB_SQL_SERV_VERSION) { pRes->code = tscProcessServerVer(pSql); diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index cccc81274d..0386850f63 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -444,7 +444,7 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { uint32_t size = tscGetTableMetaMaxSize(); STableMeta* tableMeta = calloc(1, size); - taosHashGetClone(tscTableMetaInfo, fullTableName, strlen(fullTableName), NULL, tableMeta, -1); + taosHashGetClone(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, tableMeta); tstrncpy(schema->sTableName, tableName, strlen(tableName)+1); schema->precision = tableMeta->tableInfo.precision; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index c0627f4c31..36f252a9ba 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -80,8 +80,8 @@ static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawN static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult, SUdfInfo* pUdfInfo); -static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, - int8_t type, char* fieldName, SExprInfo* pSqlExpr); +static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pColList, int16_t bytes, + int8_t type, char* fieldName, SExprInfo* pSqlExpr); static uint8_t convertRelationalOperator(SStrToken *pToken); @@ -8113,6 +8113,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } } + pTableMeta = calloc(1, maxSize); plist = taosArrayInit(4, POINTER_BYTES); @@ -8128,9 +8129,16 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { size_t len = strlen(name); memset(pTableMeta, 0, maxSize); - taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMeta, -1); + taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMeta); if (pTableMeta->id.uid > 0) { + tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name); + + // avoid mem leak, may should update pTableMeta + const char* pTableName = tNameGetTableName(pname); + size_t nameLen = strlen(pTableName); + + void* pVgroupIdList = NULL; if (pTableMeta->tableType == TSDB_CHILD_TABLE) { code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, pSql->pBuf); @@ -8142,23 +8150,33 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { } } else if (pTableMeta->tableType == TSDB_SUPER_TABLE) { // the vgroup list of super table is not kept in local buffer, so here need retrieve it from the mnode each time - char* t = strdup(name); - taosArrayPush(pVgroupList, &t); + void* pv = taosCacheAcquireByKey(tscVgroupListBuf, pTableName, nameLen); + if (pv == NULL) { + char* t = strdup(name); + taosArrayPush(pVgroupList, &t); + tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, pTableName); + } else { + tFilePage* pdata = (tFilePage*) pv; + pVgroupIdList = taosArrayInit(pdata->num, sizeof(int32_t)); + if (pVgroupIdList == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + taosArrayAddBatch(pVgroupIdList, pdata->data, pdata->num); + taosCacheRelease(tscVgroupListBuf, &pv, false); + } } - //STableMeta* pMeta = tscTableMetaDup(pTableMeta); - //STableMetaVgroupInfo p = { .pTableMeta = pMeta }; - - //const char* px = tNameGetTableName(pname); - //taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo)); - // avoid mem leak, may should update pTableMeta - const char* px = tNameGetTableName(pname); - if (taosHashGet(pCmd->pTableMetaMap, px, strlen(px)) == NULL) { + if (taosHashGet(pCmd->pTableMetaMap, pTableName, nameLen) == NULL) { STableMeta* pMeta = tscTableMetaDup(pTableMeta); - STableMetaVgroupInfo p = { .pTableMeta = pMeta, .pVgroupInfo = NULL}; - taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo)); + STableMetaVgroupInfo tvi = { .pTableMeta = pMeta, .vgroupIdList = pVgroupIdList}; + taosHashPut(pCmd->pTableMetaMap, pTableName, nameLen, &tvi, sizeof(STableMetaVgroupInfo)); } - } else { // add to the retrieve table meta array list. + } else { + // Add to the retrieve table meta array list. + // If the tableMeta is missing, the cached vgroup list for the corresponding super table will be ignored. + tscDebug("0x%"PRIx64" failed to retrieve table meta %s from local buf", pSql->self, name); + char* t = strdup(name); taosArrayPush(plist, &t); } @@ -8278,16 +8296,37 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta); assert(pTableMetaInfo->pTableMeta != NULL); - if (p->pVgroupInfo != NULL) { - pTableMetaInfo->vgroupList = tscVgroupsInfoDup(p->pVgroupInfo); - } + if (p->vgroupIdList != NULL) { + size_t s = taosArrayGetSize(p->vgroupIdList); - if (code != TSDB_CODE_SUCCESS) { - return code; + size_t vgroupsz = sizeof(SVgroupInfo) * s + sizeof(SVgroupsInfo); + pTableMetaInfo->vgroupList = calloc(1, vgroupsz); + if (pTableMetaInfo->vgroupList == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + pTableMetaInfo->vgroupList->numOfVgroups = (int32_t) s; + for(int32_t j = 0; j < s; ++j) { + int32_t* id = taosArrayGet(p->vgroupIdList, j); + + // check if current buffer contains the vgroup info. If not, add it + SNewVgroupInfo existVgroupInfo = {.inUse = -1,}; + taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo); + + assert(existVgroupInfo.inUse >= 0); + SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; + + pVgroup->numOfEps = existVgroupInfo.numOfEps; + pVgroup->vgId = existVgroupInfo.vgId; + for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) { + pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port; + pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN); + } + } } } - return TSDB_CODE_SUCCESS; + return code; } static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e975dd7b06..63bb9ee214 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -157,7 +157,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) { assert(vgId > 0); SNewVgroupInfo vgroupInfo = {.vgId = -1}; - taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo); assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0); tscDebug("before: Endpoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps); @@ -344,6 +344,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { rpcFreeCont(rpcMsg->pCont); return; } + assert(pSql->self == handle); STscObj *pObj = pSql->pTscObj; @@ -614,7 +615,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT; SNewVgroupInfo vgroupInfo = {0}; - taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo); tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); tscDebug("0x%"PRIx64" submit msg built, numberOfEP:%d", pSql->self, pSql->epSet.numOfEps); @@ -687,7 +688,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab vgId = pTableMeta->vgId; SNewVgroupInfo vgroupInfo = {0}; - taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo); tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); } @@ -1582,7 +1583,7 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) { STableMeta *pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; SNewVgroupInfo vgroupInfo = {.vgId = -1}; - taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo); assert(vgroupInfo.vgId > 0); tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); @@ -1809,34 +1810,6 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_SUCCESS; } -int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { -#if 0 - SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd); - - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload; - - int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pInfoMsg->tableFname); - if (code != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - - pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0); - - char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg); - - if (pCmd->autoCreated && pCmd->tagData.dataLen != 0) { - pMsg = serializeTagData(&pCmd->tagData, pMsg); - } - - pCmd->payloadLen = (int32_t)(pMsg - (char*)pInfoMsg); - pCmd->msgType = TSDB_MSG_TYPE_CM_TABLE_META; -#endif - - return TSDB_CODE_SUCCESS; -} - /** * multi table meta req pkg format: * |SMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ...... @@ -1996,20 +1969,17 @@ static int32_t tableMetaMsgConvert(STableMetaMsg* pMetaMsg) { } // update the vgroupInfo if needed -static void doUpdateVgroupInfo(STableMeta *pTableMeta, SVgroupMsg *pVgroupMsg) { - if (pTableMeta->vgId > 0) { - int32_t vgId = pTableMeta->vgId; - assert(pTableMeta->tableType != TSDB_SUPER_TABLE); +static void doUpdateVgroupInfo(int32_t vgId, SVgroupMsg *pVgroupMsg) { + assert(vgId > 0); - SNewVgroupInfo vgroupInfo = {.inUse = -1}; - taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + SNewVgroupInfo vgroupInfo = {.inUse = -1}; + taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo); - // vgroup info exists, compare with it - if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, pVgroupMsg)) || (vgroupInfo.inUse < 0)) { - vgroupInfo = createNewVgroupInfo(pVgroupMsg); - taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo)); - tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap)); - } + // vgroup info exists, compare with it + if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, pVgroupMsg)) || (vgroupInfo.inUse < 0)) { + vgroupInfo = createNewVgroupInfo(pVgroupMsg); + taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo)); + tscDebug("add/update new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap)); } } @@ -2022,18 +1992,18 @@ static void doAddTableMetaToLocalBuf(STableMeta* pTableMeta, STableMetaMsg* pMet if (updateSTable) { STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg); uint32_t size = tscGetTableMetaSize(pSupTableMeta); - int32_t code = taosHashPut(tscTableMetaInfo, pTableMeta->sTableName, len, pSupTableMeta, size); + int32_t code = taosHashPut(tscTableMetaMap, pTableMeta->sTableName, len, pSupTableMeta, size); assert(code == TSDB_CODE_SUCCESS); tfree(pSupTableMeta); } CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta); - taosHashPut(tscTableMetaInfo, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta)); + taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta)); tfree(cMeta); } else { uint32_t s = tscGetTableMetaSize(pTableMeta); - taosHashPut(tscTableMetaInfo, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s); + taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s); } } @@ -2058,7 +2028,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { assert(strncmp(pMetaMsg->tableFname, name, tListLen(pMetaMsg->tableFname)) == 0); doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, true); - doUpdateVgroupInfo(pTableMeta, &pMetaMsg->vgroup); + if (pTableMeta->tableType != TSDB_SUPER_TABLE) { + doUpdateVgroupInfo(pTableMeta->vgId, &pMetaMsg->vgroup); + } tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self, pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name), pTableMeta->tableInfo.numOfColumns, @@ -2068,6 +2040,37 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } +static SArray* createVgroupIdListFromMsg(char* pMsg, SHashObj* pSet, char* name, int32_t* size, uint64_t id) { + SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg; + + pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups); + *size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg)); + + SArray* vgroupIdList = taosArrayInit(pVgroupMsg->numOfVgroups, sizeof(int32_t)); + + if (pVgroupMsg->numOfVgroups <= 0) { + tscDebug("0x%" PRIx64 " empty vgroup id list, no corresponding tables for stable:%s", id, name); + } else { + // just init, no need to lock + for (int32_t j = 0; j < pVgroupMsg->numOfVgroups; ++j) { + SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; + vmsg->vgId = htonl(vmsg->vgId); + for (int32_t k = 0; k < vmsg->numOfEps; ++k) { + vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port); + } + + taosArrayPush(vgroupIdList, &vmsg->vgId); + + if (taosHashGet(pSet, &vmsg->vgId, sizeof(vmsg->vgId)) == NULL) { + taosHashPut(pSet, &vmsg->vgId, sizeof(vmsg->vgId), "", 0); + doUpdateVgroupInfo(vmsg->vgId, vmsg); + } + } + } + + return vgroupIdList; +} + static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t id) { SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg; pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups); @@ -2092,24 +2095,14 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port); } - SNewVgroupInfo newVi = createNewVgroupInfo(vmsg); - pVgroup->numOfEps = newVi.numOfEps; - pVgroup->vgId = newVi.vgId; + pVgroup->numOfEps = vmsg->numOfEps; + pVgroup->vgId = vmsg->vgId; for (int32_t k = 0; k < vmsg->numOfEps; ++k) { - pVgroup->epAddr[k].port = newVi.ep[k].port; - pVgroup->epAddr[k].fqdn = strndup(newVi.ep[k].fqdn, TSDB_FQDN_LEN); + pVgroup->epAddr[k].port = vmsg->epAddr[k].port; + pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); } - // check if current buffer contains the vgroup info. - // If not, add it - SNewVgroupInfo existVgroupInfo = {.inUse = -1}; - taosHashGetClone(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), NULL, &existVgroupInfo, sizeof(SNewVgroupInfo)); - - if (((existVgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&existVgroupInfo, vmsg)) || - (existVgroupInfo.inUse < 0)) { // vgroup info exists, compare with it - taosHashPut(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), &newVi, sizeof(newVi)); - tscDebug("0x%" PRIx64 " add new VgroupInfo, vgId:%d, total cached:%d", id, newVi.vgId, (int32_t)taosHashGetSize(tscVgroupMap)); - } + doUpdateVgroupInfo(pVgroup->vgId, vmsg); } } @@ -2187,6 +2180,8 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { char* buf = NULL; char* pMsg = pMultiMeta->meta; + + // decompresss the message payload if (pMultiMeta->compressed) { buf = malloc(pMultiMeta->rawLen - sizeof(SMultiTableMeta)); int32_t len = tsDecompressString(pMultiMeta->meta, pMultiMeta->contLen - sizeof(SMultiTableMeta), 1, @@ -2245,7 +2240,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { // for each vgroup, only update the information once. int64_t vgId = pMetaMsg->vgroup.vgId; if (pTableMeta->tableType != TSDB_SUPER_TABLE && taosHashGet(pSet, &vgId, sizeof(vgId)) == NULL) { - doUpdateVgroupInfo(pTableMeta, &pMetaMsg->vgroup); + doUpdateVgroupInfo(vgId, &pMetaMsg->vgroup); taosHashPut(pSet, &vgId, sizeof(vgId), "", 0); } @@ -2263,11 +2258,26 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { assert(p != NULL); int32_t size = 0; - if (p->pVgroupInfo!= NULL) { - tscVgroupInfoClear(p->pVgroupInfo); - //tfree(p->pTableMeta); + if (p->vgroupIdList!= NULL) { + taosArrayDestroy(p->vgroupIdList); } - p->pVgroupInfo = createVgroupInfoFromMsg(pMsg, &size, pSql->self); + + char tableName[TSDB_TABLE_FNAME_LEN] = {0}; + tstrncpy(tableName, name, TSDB_TABLE_NAME_LEN); + p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, tableName, &size, pSql->self); + + int32_t numOfVgId = taosArrayGetSize(p->vgroupIdList); + int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t); + + tFilePage* idList = calloc(1, s); + idList->num = numOfVgId; + memcpy(idList->data, TARRAY_GET_START(p->vgroupIdList), numOfVgId * sizeof(int32_t)); + + void* idListInst = taosCachePut(tscVgroupListBuf, tableName, strlen(tableName), idList, s, 5000); + taosCacheRelease(tscVgroupListBuf, (void*) &idListInst, false); + + tfree(idList); + pMsg += size; } @@ -2503,7 +2513,7 @@ int tscProcessDropDbRsp(SSqlObj *pSql) { //TODO LOCK DB WHEN MODIFY IT //pSql->pTscObj->db[0] = 0; - taosHashClear(tscTableMetaInfo); + taosHashClear(tscTableMetaMap); return 0; } @@ -2514,8 +2524,8 @@ int tscProcessDropTableRsp(SSqlObj *pSql) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); - tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaInfo)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaMap)); tfree(pTableMetaInfo->pTableMeta); return 0; @@ -2530,11 +2540,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { tscDebug("0x%"PRIx64" remove tableMeta in hashMap after alter-table: %s", pSql->self, name); bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); tfree(pTableMetaInfo->pTableMeta); if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta - taosHashClear(tscTableMetaInfo); + taosHashClear(tscTableMetaMap); } return 0; @@ -2801,7 +2811,7 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool tNameExtractFullName(&pTableMetaInfo->name, name); size_t len = strlen(name); - taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1); + taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMetaInfo->pTableMeta); // TODO resize the tableMeta assert(size < 80 * TSDB_MAX_COLUMNS); @@ -2914,7 +2924,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { // remove stored tableMeta info in hash table size_t len = strlen(name); - taosHashRemove(tscTableMetaInfo, name, len); + taosHashRemove(tscTableMetaMap, name, len); return getTableMetaFromMnode(pSql, pTableMetaInfo, false); } @@ -2966,8 +2976,6 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, SQueryInfo* pQueryInfo) { tscDebug("0x%"PRIx64" svgroupRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->svgroupRid, pNew->self); pSql->svgroupRid = pNew->self; - - tscDebug("0x%"PRIx64" new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql->self, pNew, pNewQueryInfo->numOfTables); pNew->fp = tscTableMetaCallBack; @@ -3010,7 +3018,6 @@ void tscInitMsgsFp() { tscBuildMsg[TSDB_SQL_CONNECT] = tscBuildConnectMsg; tscBuildMsg[TSDB_SQL_USE_DB] = tscBuildUseDbMsg; -// tscBuildMsg[TSDB_SQL_META] = tscBuildTableMetaMsg; tscBuildMsg[TSDB_SQL_STABLEVGROUP] = tscBuildSTableVgroupMsg; tscBuildMsg[TSDB_SQL_RETRIEVE_FUNC] = tscBuildRetrieveFuncMsg; diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index da5bdf669f..502ef22d4b 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -206,7 +206,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); tfree(pTableMetaInfo->pTableMeta); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b72bd78b1b..b98ffd7638 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3125,7 +3125,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) for(int32_t i = 0; i < pParentObj->cmd.insertParam.numOfTables; ++i) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(pParentObj->cmd.insertParam.pTableNameList[i], name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } pParentObj->res.code = TSDB_CODE_SUCCESS; diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 7b8f24a093..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -19,15 +19,12 @@ #include "trpc.h" #include "tnote.h" #include "ttimer.h" -#include "tutil.h" #include "tsched.h" #include "tscLog.h" -#include "tscUtil.h" #include "tsclient.h" #include "tglobal.h" #include "tconfig.h" #include "ttimezone.h" -#include "tlocale.h" #include "qScript.h" // global, not configurable @@ -36,8 +33,10 @@ int32_t sentinel = TSC_VAR_NOT_RELEASE; -SHashObj *tscVgroupMap; // hash map to keep the global vgroup info -SHashObj *tscTableMetaInfo; // table meta info +SHashObj *tscVgroupMap; // hash map to keep the vgroup info from mnode +SHashObj *tscTableMetaMap; // table meta info buffer +SCacheObj *tscVgroupListBuf; // super table vgroup list information, only survives 5 seconds for each super table vgroup list + int32_t tscObjRef = -1; void *tscTmr; void *tscQhandle; @@ -45,17 +44,21 @@ int32_t tscRefId = -1; int32_t tscNumOfObj = 0; // number of sqlObj in current process. static void *tscCheckDiskUsageTmr; void *tscRpcCache; // cache to keep rpc obj -int32_t tscNumOfThreads = 1; // num of rpc threads -char tscLogFileName[12] = "taoslog"; -int tscLogFileNum = 10; -static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently -static pthread_once_t tscinit = PTHREAD_ONCE_INIT; +int32_t tscNumOfThreads = 1; // num of rpc threads +char tscLogFileName[12] = "taoslog"; +int tscLogFileNum = 10; + +static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently +static pthread_once_t tscinit = PTHREAD_ONCE_INIT; + +// pthread_once can not return result code, so result code is set to a global variable. static volatile int tscInitRes = 0; void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) { taosGetDisk(); taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } + void tscFreeRpcObj(void *param) { assert(param); SRpcObj *pRpcObj = (SRpcObj *)(param); @@ -67,10 +70,9 @@ void tscReleaseRpc(void *param) { if (param == NULL) { return; } - pthread_mutex_lock(&rpcObjMutex); - taosCacheRelease(tscRpcCache, (void *)¶m, false); - pthread_mutex_unlock(&rpcObjMutex); -} + + taosCacheRelease(tscRpcCache, (void *)¶m, false); +} int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncrypt, void **ppRpcObj) { pthread_mutex_lock(&rpcObjMutex); @@ -80,7 +82,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry *ppRpcObj = pRpcObj; pthread_mutex_unlock(&rpcObjMutex); return 0; - } + } SRpcInit rpcInit; memset(&rpcInit, 0, sizeof(rpcInit)); @@ -104,7 +106,8 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry pthread_mutex_unlock(&rpcObjMutex); tscError("failed to init connection to TDengine"); return -1; - } + } + pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*5); if (pRpcObj == NULL) { rpcClose(rpcObj.pDnodeConn); @@ -118,7 +121,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry } void taos_init_imp(void) { - char temp[128] = {0}; + char temp[128] = {0}; errno = TSDB_CODE_SUCCESS; srand(taosGetTimestampSec()); @@ -151,36 +154,41 @@ void taos_init_imp(void) { rpcInit(); scriptEnvPoolInit(); + tscDebug("starting to initialize TAOS client ..."); tscDebug("Local End Point is:%s", tsLocalEp); } taosSetCoreDump(); tscInitMsgsFp(); - int queueSize = tsMaxConnections*2; double factor = (tscEmbedded == 0)? 2.0:4.0; tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor); if (tscNumOfThreads < 2) { tscNumOfThreads = 2; } + + int32_t queueSize = tsMaxConnections*2; tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc"); if (NULL == tscQhandle) { - tscError("failed to init scheduler"); + tscError("failed to init task queue"); tscInitRes = -1; return; } + tscDebug("client task queue is initialized, numOfWorkers: %d", tscNumOfThreads); + tscTmr = taosTmrInit(tsMaxConnections * 2, 200, 60000, "TSC"); if(0 == tscEmbedded){ taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } - if (tscTableMetaInfo == NULL) { - tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj); - tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); - tscTableMetaInfo = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); - tscDebug("TableMeta:%p", tscTableMetaInfo); + if (tscTableMetaMap == NULL) { + tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj); + tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + tscTableMetaMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); + tscVgroupListBuf = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, NULL, "stable-vgroup-list"); + tscDebug("TableMeta:%p, vgroup:%p is initialized", tscTableMetaMap, tscVgroupMap); } int refreshTime = 5; @@ -189,14 +197,17 @@ void taos_init_imp(void) { tscRefId = taosOpenRef(200, tscCloseTscObj); - // in other language APIs, taos_cleanup is not available yet. - // So, to make sure taos_cleanup will be invoked to clean up the allocated - // resource to suppress the valgrind warning. + // In the APIs of other program language, taos_cleanup is not available yet. + // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. atexit(taos_cleanup); + tscDebug("client is initialized successfully"); } -int taos_init() { pthread_once(&tscinit, taos_init_imp); return tscInitRes;} +int taos_init() { + pthread_once(&tscinit, taos_init_imp); + return tscInitRes; +} // this function may be called by user or system, or by both simultaneously. void taos_cleanup(void) { @@ -205,11 +216,13 @@ void taos_cleanup(void) { if (atomic_val_compare_exchange_32(&sentinel, TSC_VAR_NOT_RELEASE, TSC_VAR_RELEASED) != TSC_VAR_NOT_RELEASE) { return; } + if (tscEmbedded == 0) { scriptEnvPoolCleanup(); } - taosHashCleanup(tscTableMetaInfo); - tscTableMetaInfo = NULL; + + taosHashCleanup(tscTableMetaMap); + tscTableMetaMap = NULL; taosHashCleanup(tscVgroupMap); tscVgroupMap = NULL; @@ -236,6 +249,9 @@ void taos_cleanup(void) { pthread_mutex_destroy(&rpcObjMutex); } + taosCacheCleanup(tscVgroupListBuf); + tscVgroupListBuf = NULL; + if (tscEmbedded == 0) { rpcCleanup(); taosCloseLog(); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 0d69fe173f..4454844ea0 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1388,7 +1388,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) { if (pCmd->pTableMetaMap != NULL) { STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL); while (p) { - tscVgroupInfoClear(p->pVgroupInfo); + taosArrayDestroy(p->vgroupIdList); tfree(p->pTableMeta); p = taosHashIterate(pCmd->pTableMetaMap, p); } @@ -1522,7 +1522,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pDataBlock->tableName, name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } if (!pDataBlock->cloned) { @@ -3365,7 +3365,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) { if (removeMeta) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables); @@ -4360,7 +4360,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v assert(pChild != NULL && buf != NULL); STableMeta* p = buf; - taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1); + taosHashGetClone(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p); // tableMeta exists, build child table meta according to the super table meta // the uid need to be checked in addition to the general name of the super table. @@ -4374,7 +4374,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v memcpy(pChild->schema, p->schema, sizeof(SSchema) *total); return TSDB_CODE_SUCCESS; } else { // super table has been removed, current tableMeta is also expired. remove it here - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); return -1; } } diff --git a/src/query/src/qScript.c b/src/query/src/qScript.c index 261164a84c..74ddf5f548 100644 --- a/src/query/src/qScript.c +++ b/src/query/src/qScript.c @@ -342,6 +342,7 @@ int32_t scriptEnvPoolInit() { env->lua_state = createLuaEnv(); tdListAppend(pool->scriptEnvs, (void *)(&env)); } + pool->mSize = size; pool->cSize = size; return 0; diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 616b844c13..a53aa602c1 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -123,10 +123,9 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); * @param keyLen * @param fp * @param d - * @param dsize * @return */ -void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize); +void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d); /** * remove item with the specified key diff --git a/src/util/src/hash.c b/src/util/src/hash.c index d7bee9b67c..2e18f36a17 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -294,10 +294,10 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da } void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { - return taosHashGetClone(pHashObj, key, keyLen, NULL, NULL, 0); + return taosHashGetClone(pHashObj, key, keyLen, NULL, NULL); } -void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize) { +void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d) { if (taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) { return NULL; } diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 8b17d3a5f2..e14b5a385e 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -93,7 +93,7 @@ static void vnodeIncRef(void *ptNode) { void *vnodeAcquire(int32_t vgId) { SVnodeObj *pVnode = NULL; if (tsVnodesHash != NULL) { - taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *)); + taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode); } if (pVnode == NULL) { From 551f54fc409b11b0834fe5168dd61dd6d170b305 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 15:36:26 +0800 Subject: [PATCH 28/81] [td-225] fix bug found by regression test. --- src/client/src/tscLocal.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 641f62f22b..ec7cb228dd 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -921,6 +921,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { pRes->code = tscProcessShowCreateDatabase(pSql); } else if (pCmd->command == TSDB_SQL_RESET_CACHE) { taosHashClear(tscTableMetaMap); + taosCacheEmpty(tscVgroupListBuf); pRes->code = TSDB_CODE_SUCCESS; } else if (pCmd->command == TSDB_SQL_SERV_VERSION) { pRes->code = tscProcessServerVer(pSql); From a54429b74c8a8541dbdf1819699fdbd86c207002 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 22:32:44 +0800 Subject: [PATCH 29/81] [td-225]fix compiler error. --- src/client/src/tscSQLParser.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 36f252a9ba..d4bbeded3c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8157,12 +8157,12 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, pTableName); } else { tFilePage* pdata = (tFilePage*) pv; - pVgroupIdList = taosArrayInit(pdata->num, sizeof(int32_t)); + pVgroupIdList = taosArrayInit((size_t) pdata->num, sizeof(int32_t)); if (pVgroupIdList == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - taosArrayAddBatch(pVgroupIdList, pdata->data, pdata->num); + taosArrayAddBatch(pVgroupIdList, pdata->data, (int32_t) pdata->num); taosCacheRelease(tscVgroupListBuf, &pv, false); } } From eb44d43bba0bc43b6bff30ce8c60b4e81312f23c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 22:44:20 +0800 Subject: [PATCH 30/81] [td-225]fix compiler error. --- src/client/src/tscServer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 63bb9ee214..1c0e665276 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2240,7 +2240,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { // for each vgroup, only update the information once. int64_t vgId = pMetaMsg->vgroup.vgId; if (pTableMeta->tableType != TSDB_SUPER_TABLE && taosHashGet(pSet, &vgId, sizeof(vgId)) == NULL) { - doUpdateVgroupInfo(vgId, &pMetaMsg->vgroup); + doUpdateVgroupInfo((int32_t) vgId, &pMetaMsg->vgroup); taosHashPut(pSet, &vgId, sizeof(vgId), "", 0); } From 6e015f2cf691bfe91c7a780cce26ad494d556718 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 22:53:32 +0800 Subject: [PATCH 31/81] [td-225] fix compiler error. --- src/client/src/tscServer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 1c0e665276..dd40314265 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2266,7 +2266,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { tstrncpy(tableName, name, TSDB_TABLE_NAME_LEN); p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, tableName, &size, pSql->self); - int32_t numOfVgId = taosArrayGetSize(p->vgroupIdList); + int32_t numOfVgId = (int32_t) taosArrayGetSize(p->vgroupIdList); int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t); tFilePage* idList = calloc(1, s); From cbbb0593857c3c1d8dc6507562df6dec4d4336d6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 25 Jul 2021 16:04:20 +0800 Subject: [PATCH 32/81] [td-225]fix bug found by regression test. --- src/client/src/tscSQLParser.c | 19 +++++++++---------- src/client/src/tscServer.c | 35 +++++++++++++++-------------------- src/common/inc/tcmdtype.h | 5 +---- src/mnode/src/mnodeTable.c | 8 ++------ 4 files changed, 27 insertions(+), 40 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index d4bbeded3c..fef9aafad0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7237,7 +7237,7 @@ void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex) { } tmpLen = - sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", name, pExpr->base.uid, pExpr->base.colInfo.colId); + sprintf(tmpBuf, "%s(uid:%" PRIu64 ", %d)", name, pExpr->base.uid, pExpr->base.colInfo.colId); if (tmpLen + offset >= totalBufSize - 1) break; @@ -8135,9 +8135,6 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name); // avoid mem leak, may should update pTableMeta - const char* pTableName = tNameGetTableName(pname); - size_t nameLen = strlen(pTableName); - void* pVgroupIdList = NULL; if (pTableMeta->tableType == TSDB_CHILD_TABLE) { code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, pSql->pBuf); @@ -8150,11 +8147,12 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { } } else if (pTableMeta->tableType == TSDB_SUPER_TABLE) { // the vgroup list of super table is not kept in local buffer, so here need retrieve it from the mnode each time - void* pv = taosCacheAcquireByKey(tscVgroupListBuf, pTableName, nameLen); + tscDebug("0x%"PRIx64" try to acquire cached super table %s vgroup id list", pSql->self, name); + void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len); if (pv == NULL) { char* t = strdup(name); taosArrayPush(pVgroupList, &t); - tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, pTableName); + tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, name); } else { tFilePage* pdata = (tFilePage*) pv; pVgroupIdList = taosArrayInit((size_t) pdata->num, sizeof(int32_t)); @@ -8167,10 +8165,10 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { } } - if (taosHashGet(pCmd->pTableMetaMap, pTableName, nameLen) == NULL) { + if (taosHashGet(pCmd->pTableMetaMap, name, len) == NULL) { STableMeta* pMeta = tscTableMetaDup(pTableMeta); STableMetaVgroupInfo tvi = { .pTableMeta = pMeta, .vgroupIdList = pVgroupIdList}; - taosHashPut(pCmd->pTableMetaMap, pTableName, nameLen, &tvi, sizeof(STableMetaVgroupInfo)); + taosHashPut(pCmd->pTableMetaMap, name, len, &tvi, sizeof(STableMetaVgroupInfo)); } } else { // Add to the retrieve table meta array list. @@ -8290,8 +8288,9 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod strncpy(pTableMetaInfo->aliasName, tNameGetTableName(&pTableMetaInfo->name), tListLen(pTableMetaInfo->aliasName)); } - const char* name = tNameGetTableName(&pTableMetaInfo->name); - STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, name, strlen(name)); + char fname[TSDB_TABLE_FNAME_LEN] = {0}; + tNameExtractFullName(&pTableMetaInfo->name, fname); + STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, fname, strnlen(fname, TSDB_TABLE_FNAME_LEN)); pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta); assert(pTableMetaInfo->pTableMeta != NULL); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index dd40314265..8a11cd6b93 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -391,9 +391,9 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { } if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && - (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || - rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || - rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || + (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || // change the retry procedure + /*(*/rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || + rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || // change the retry procedure rpcMsg->code == TSDB_CODE_APP_NOT_READY)) { pSql->retry++; @@ -404,7 +404,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry); } else { // wait for a little bit moment and then retry - // todo do not sleep in rpc callback thread, add this process into queueu to process + // todo do not sleep in rpc callback thread, add this process into queue to process if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { int32_t duration = getWaitingTimeInterval(pSql->retry); taosMsleep(duration); @@ -2214,15 +2214,10 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { return TSDB_CODE_TSC_INVALID_VALUE; } - SName sn = {0}; - tNameFromString(&sn, pMetaMsg->tableFname, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - if (pMultiMeta->metaClone == 1 || pTableMeta->tableType == TSDB_SUPER_TABLE) { STableMetaVgroupInfo p = {.pTableMeta = pTableMeta,}; - - const char* tableName = tNameGetTableName(&sn); - size_t keyLen = strlen(tableName); - taosHashPut(pParentCmd->pTableMetaMap, tableName, keyLen, &p, sizeof(STableMetaVgroupInfo)); + size_t keyLen = strnlen(pMetaMsg->tableFname, TSDB_TABLE_FNAME_LEN); + taosHashPut(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen, &p, sizeof(STableMetaVgroupInfo)); } else { freeMeta = true; } @@ -2251,10 +2246,13 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { } for(int32_t i = 0; i < pMultiMeta->numOfVgroup; ++i) { - char* name = pMsg; - pMsg += TSDB_TABLE_NAME_LEN; + char fname[TSDB_TABLE_FNAME_LEN] = {0}; + tstrncpy(fname, pMsg, TSDB_TABLE_FNAME_LEN); + size_t len = strnlen(fname, TSDB_TABLE_FNAME_LEN); - STableMetaVgroupInfo* p = taosHashGet(pParentCmd->pTableMetaMap, name, strnlen(name, TSDB_TABLE_NAME_LEN)); + pMsg += TSDB_TABLE_FNAME_LEN; + + STableMetaVgroupInfo* p = taosHashGet(pParentCmd->pTableMetaMap, fname, len); assert(p != NULL); int32_t size = 0; @@ -2262,9 +2260,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { taosArrayDestroy(p->vgroupIdList); } - char tableName[TSDB_TABLE_FNAME_LEN] = {0}; - tstrncpy(tableName, name, TSDB_TABLE_NAME_LEN); - p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, tableName, &size, pSql->self); + p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, fname, &size, pSql->self); int32_t numOfVgId = (int32_t) taosArrayGetSize(p->vgroupIdList); int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t); @@ -2273,11 +2269,10 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { idList->num = numOfVgId; memcpy(idList->data, TARRAY_GET_START(p->vgroupIdList), numOfVgId * sizeof(int32_t)); - void* idListInst = taosCachePut(tscVgroupListBuf, tableName, strlen(tableName), idList, s, 5000); + void* idListInst = taosCachePut(tscVgroupListBuf, fname, len, idList, s, 5000); taosCacheRelease(tscVgroupListBuf, (void*) &idListInst, false); tfree(idList); - pMsg += size; } @@ -2918,7 +2913,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; if (pTableMeta) { - tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql->self, name, + tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRIu64, pSql->self, name, tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid); } diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h index bd2c2e46f8..918763ebb4 100644 --- a/src/common/inc/tcmdtype.h +++ b/src/common/inc/tcmdtype.h @@ -88,10 +88,7 @@ enum { TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable") TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database") - /* - * build empty result instead of accessing dnode to fetch result - * reset the client cache - */ + // build empty result instead of accessing dnode to fetch result reset the client cache TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" ) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 6e5cf14b96..0bc114ffdf 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1812,12 +1812,8 @@ static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) } static char* serializeVgroupInfo(SSTableObj *pTable, char* name, char* msg, SMnodeMsg* pMsgBody, void* handle) { - SName sn = {0}; - tNameFromString(&sn, name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - const char* tableName = tNameGetTableName(&sn); - - strncpy(msg, tableName, TSDB_TABLE_NAME_LEN); - msg += TSDB_TABLE_NAME_LEN; + strncpy(msg, name, TSDB_TABLE_FNAME_LEN); + msg += TSDB_TABLE_FNAME_LEN; if (pTable->vgHash == NULL) { mDebug("msg:%p, app:%p stable:%s, no vgroup exist while get stable vgroup info", pMsgBody, handle, name); From 9e0f7d933b1f339bf7f1a6dde8a7281c0eb638f6 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 26 Jul 2021 10:04:37 +0800 Subject: [PATCH 33/81] [TD-5451]:fixed buffer overflow caused by sprintf. Removed debug prints --- src/client/src/tscParseLineProtocol.c | 25 ++++--------------------- src/util/src/tstrbuild.c | 10 +++++----- 2 files changed, 9 insertions(+), 26 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index d59be5e79f..03dd51caa8 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -1225,7 +1225,6 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { uint64_t val_u; double val_d; - printf("origin str:%s\n", str); if (IS_FLOAT_TYPE(type)) { val_d = strtod(str, NULL); } else { @@ -1237,19 +1236,17 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { } if (errno == ERANGE) { - printf("out of range\n"); + tscError("Converted number out of range"); return false; } switch (type) { case TSDB_DATA_TYPE_TINYINT: if (!IS_VALID_TINYINT(val_s)) { - printf("tiny int out of range\n"); return false; } pVal->value = calloc(length, 1); *(int8_t *)(pVal->value) = (int8_t)val_s; - printf("tiny int:%d\n", *(int8_t *)(pVal->value)); break; case TSDB_DATA_TYPE_UTINYINT: if (!IS_VALID_UTINYINT(val_u)) { @@ -1257,16 +1254,13 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { } pVal->value = calloc(length, 1); *(uint8_t *)(pVal->value) = (uint8_t)val_u; - printf("tiny uint:%u\n", *(uint8_t *)(pVal->value)); break; case TSDB_DATA_TYPE_SMALLINT: if (!IS_VALID_SMALLINT(val_s)) { - printf("small int out of range\n"); return false; } pVal->value = calloc(length, 1); *(int16_t *)(pVal->value) = (int16_t)val_s; - printf("small int:%d\n", *(int16_t *)(pVal->value)); break; case TSDB_DATA_TYPE_USMALLINT: if (!IS_VALID_USMALLINT(val_u)) { @@ -1274,16 +1268,13 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { } pVal->value = calloc(length, 1); *(uint16_t *)(pVal->value) = (uint16_t)val_u; - printf("small uint:%u\n", *(uint16_t *)(pVal->value)); break; case TSDB_DATA_TYPE_INT: if (!IS_VALID_INT(val_s)) { - printf("int out of range\n"); return false; } pVal->value = calloc(length, 1); *(int32_t *)(pVal->value) = (int32_t)val_s; - printf("int:%d\n", *(int32_t *)(pVal->value)); break; case TSDB_DATA_TYPE_UINT: if (!IS_VALID_UINT(val_u)) { @@ -1291,16 +1282,13 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { } pVal->value = calloc(length, 1); *(uint32_t *)(pVal->value) = (uint32_t)val_u; - printf("uint:%u\n", *(uint32_t *)(pVal->value)); break; case TSDB_DATA_TYPE_BIGINT: if (!IS_VALID_BIGINT(val_s)) { - printf("big int out of range\n"); return false; } pVal->value = calloc(length, 1); *(int64_t *)(pVal->value) = (int64_t)val_s; - printf("big int:%ld\n", *(int64_t *)(pVal->value)); break; case TSDB_DATA_TYPE_UBIGINT: if (!IS_VALID_UBIGINT(val_u)) { @@ -1308,25 +1296,20 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { } pVal->value = calloc(length, 1); *(uint64_t *)(pVal->value) = (uint64_t)val_u; - printf("big uint:%lu\n", *(uint64_t *)(pVal->value)); break; case TSDB_DATA_TYPE_FLOAT: if (!IS_VALID_FLOAT(val_d)) { - printf("float out of range\n"); return false; } pVal->value = calloc(length, 1); *(float *)(pVal->value) = (float)val_d; - printf("float:%.5e\n", *(float *)(pVal->value)); break; case TSDB_DATA_TYPE_DOUBLE: if (!IS_VALID_DOUBLE(val_d)) { - printf("double out of range\n"); return false; } pVal->value = calloc(length, 1); *(double *)(pVal->value) = (double)val_d; - printf("double:%.5e\n", *(double *)(pVal->value)); break; default: return false; @@ -1463,9 +1446,9 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, if (isValidInteger(value) || isValidFloat(value)) { pVal->type = TSDB_DATA_TYPE_FLOAT; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; - pVal->value = calloc(pVal->length, 1); - float val = (float)strtold(value, NULL); - memcpy(pVal->value, &val, pVal->length); + if (!convertStrToNumber(pVal, value)) { + return false; + } return true; } return false; diff --git a/src/util/src/tstrbuild.c b/src/util/src/tstrbuild.c index 61a6d67952..eec21d1835 100644 --- a/src/util/src/tstrbuild.c +++ b/src/util/src/tstrbuild.c @@ -69,12 +69,12 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) { char buf[64]; - size_t len = sprintf(buf, "%" PRId64, v); - taosStringBuilderAppendStringLen(sb, buf, len); + size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v); + taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf))); } void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) { - char buf[64]; - size_t len = sprintf(buf, "%.9lf", v); - taosStringBuilderAppendStringLen(sb, buf, len); + char buf[512]; + size_t len = snprintf(buf, sizeof(buf), "%.9lf", v); + taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf))); } From 4eeea0d8d6971556b81c3a8cb0526ed27ff1b27c Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 26 Jul 2021 10:14:47 +0800 Subject: [PATCH 34/81] [TD-5523]: arm32 4byte long error. --- src/query/src/qExecutor.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 982c45c441..d479e90415 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -149,12 +149,12 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) { int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor); tm.tm_year = mon / 12; tm.tm_mon = mon % 12; - tw->skey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); + tw->skey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); mon = (int)(mon + interval); tm.tm_year = mon / 12; tm.tm_mon = mon % 12; - tw->ekey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); + tw->ekey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); tw->ekey -= 1; } From e393a5614b17e6e179720787bbaf9886be46cfeb Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 26 Jul 2021 10:16:50 +0800 Subject: [PATCH 35/81] [TD-5520]: lowcase letters in database names and line names can be uppercase --- src/client/src/tscParseLineProtocol.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index d5883af7f6..8f9cdbf002 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -238,7 +238,10 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[], SSchemaAction* action, bool* actionNeeded) { - size_t* pDbIndex = taosHashGet(dbAttrHash, pointColField->name, strlen(pointColField->name)); + char fieldNameLowerCase[TSDB_COL_NAME_LEN] = {0}; + strtolower(fieldNameLowerCase, pointColField->name); + + size_t* pDbIndex = taosHashGet(dbAttrHash, fieldNameLowerCase, strlen(fieldNameLowerCase)); if (pDbIndex) { SSchema* dbAttr = taosArrayGet(dbAttrArray, *pDbIndex); assert(strcasecmp(dbAttr->name, pointColField->name) == 0); @@ -399,8 +402,11 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { tscDebug("load table schema. super table name: %s", tableName); + char tableNameLowerCase[TSDB_TABLE_NAME_LEN]; + strtolower(tableNameLowerCase, tableName); + char sql[256]; - snprintf(sql, 256, "describe %s", tableName); + snprintf(sql, 256, "describe %s", tableNameLowerCase); TAOS_RES* res = taos_query(taos, sql); code = taos_errno(res); if (code != 0) { @@ -415,8 +421,8 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { pSql->signature = pSql; pSql->fp = NULL; - SStrToken tableToken = {.z=tableName, .n=(uint32_t)strlen(tableName), .type=TK_ID}; - tGetToken(tableName, &tableToken.type); + SStrToken tableToken = {.z=tableNameLowerCase, .n=(uint32_t)strlen(tableNameLowerCase), .type=TK_ID}; + tGetToken(tableNameLowerCase, &tableToken.type); // Check if the table name available or not if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; From 75c217e68218aa0048a56020c58a07f6d3360e30 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 26 Jul 2021 10:37:35 +0800 Subject: [PATCH 36/81] [TD-5517]: return error when apply schema action failed --- src/client/src/tscParseLineProtocol.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 8f9cdbf002..7645a30eee 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -518,7 +518,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { bool actionNeeded = false; generateSchemaAction(pointTag, dbTagHash, dbSchema.tags, true, pointSchema->sTableName, &schemaAction, &actionNeeded); if (actionNeeded) { - applySchemaAction(taos, &schemaAction); + code = applySchemaAction(taos, &schemaAction); + if (code != 0) { + destroySmlSTableSchema(&dbSchema); + return code; + } } } @@ -532,7 +536,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { bool actionNeeded = false; generateSchemaAction(pointCol, dbFieldHash, dbSchema.fields,false, pointSchema->sTableName, &schemaAction, &actionNeeded); if (actionNeeded) { - applySchemaAction(taos, &schemaAction); + code = applySchemaAction(taos, &schemaAction); + if (code != 0) { + destroySmlSTableSchema(&dbSchema); + return code; + } } } From 9607c6681ed544353fb4028df18dc1a74cf8cdd3 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 26 Jul 2021 10:04:37 +0800 Subject: [PATCH 37/81] [TD-5451]:fixed buffer overflow caused by sprintf. Removed debug prints --- src/client/src/tscParseLineProtocol.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 03dd51caa8..567f05a44d 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -1892,11 +1892,8 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { cleanup: tscDebug("taos_insert_lines finish inserting %d lines. code: %d", numLines, code); - numPoints = taosArrayGetSize(lpPoints); - TAOS_SML_DATA_POINT* ppoints = TARRAY_GET_START(lpPoints); - for (int i=0; i Date: Mon, 26 Jul 2021 10:46:41 +0800 Subject: [PATCH 38/81] [TD-5496]: add filter length when estimate query msg size --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index c746484944..22189dd0be 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4588,7 +4588,7 @@ int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) { for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) { len += sizeof(SColumnFilterInfo); if (pCol->info.flist.filterInfo[j].filterstr) { - len += (size_t) pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE; + len += pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE; } } } From 11dd18cff2304101d56143ff6b17453ea92e56e0 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 26 Jul 2021 10:16:50 +0800 Subject: [PATCH 39/81] [TD-5520]: lowcase letters in database names and line names can be uppercase --- src/client/src/tscParseLineProtocol.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index d5883af7f6..8f9cdbf002 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -238,7 +238,10 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[], SSchemaAction* action, bool* actionNeeded) { - size_t* pDbIndex = taosHashGet(dbAttrHash, pointColField->name, strlen(pointColField->name)); + char fieldNameLowerCase[TSDB_COL_NAME_LEN] = {0}; + strtolower(fieldNameLowerCase, pointColField->name); + + size_t* pDbIndex = taosHashGet(dbAttrHash, fieldNameLowerCase, strlen(fieldNameLowerCase)); if (pDbIndex) { SSchema* dbAttr = taosArrayGet(dbAttrArray, *pDbIndex); assert(strcasecmp(dbAttr->name, pointColField->name) == 0); @@ -399,8 +402,11 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { tscDebug("load table schema. super table name: %s", tableName); + char tableNameLowerCase[TSDB_TABLE_NAME_LEN]; + strtolower(tableNameLowerCase, tableName); + char sql[256]; - snprintf(sql, 256, "describe %s", tableName); + snprintf(sql, 256, "describe %s", tableNameLowerCase); TAOS_RES* res = taos_query(taos, sql); code = taos_errno(res); if (code != 0) { @@ -415,8 +421,8 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { pSql->signature = pSql; pSql->fp = NULL; - SStrToken tableToken = {.z=tableName, .n=(uint32_t)strlen(tableName), .type=TK_ID}; - tGetToken(tableName, &tableToken.type); + SStrToken tableToken = {.z=tableNameLowerCase, .n=(uint32_t)strlen(tableNameLowerCase), .type=TK_ID}; + tGetToken(tableNameLowerCase, &tableToken.type); // Check if the table name available or not if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; From 09d4fe1b5eb24ec4b31503b45df086fdefe78029 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 26 Jul 2021 10:37:35 +0800 Subject: [PATCH 40/81] [TD-5517]: return error when apply schema action failed --- src/client/src/tscParseLineProtocol.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 8f9cdbf002..7645a30eee 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -518,7 +518,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { bool actionNeeded = false; generateSchemaAction(pointTag, dbTagHash, dbSchema.tags, true, pointSchema->sTableName, &schemaAction, &actionNeeded); if (actionNeeded) { - applySchemaAction(taos, &schemaAction); + code = applySchemaAction(taos, &schemaAction); + if (code != 0) { + destroySmlSTableSchema(&dbSchema); + return code; + } } } @@ -532,7 +536,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { bool actionNeeded = false; generateSchemaAction(pointCol, dbFieldHash, dbSchema.fields,false, pointSchema->sTableName, &schemaAction, &actionNeeded); if (actionNeeded) { - applySchemaAction(taos, &schemaAction); + code = applySchemaAction(taos, &schemaAction); + if (code != 0) { + destroySmlSTableSchema(&dbSchema); + return code; + } } } From 2f04fe063fdcad499dfca2f319df7fe938906d91 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Mon, 26 Jul 2021 12:55:47 +0800 Subject: [PATCH 41/81] [TD-5496]: add filter length when estimate query msg size --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 22189dd0be..0204282fcc 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4588,7 +4588,7 @@ int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) { for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) { len += sizeof(SColumnFilterInfo); if (pCol->info.flist.filterInfo[j].filterstr) { - len += pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE; + len += (int32_t)pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE; } } } From 1ec384f3c168ac17bf4c544b7bea37e0db113289 Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 26 Jul 2021 13:06:05 +0800 Subject: [PATCH 42/81] fix test code issue --- tests/examples/c/apitest.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index fbe6a381df..50a62e550f 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -359,7 +359,7 @@ void verify_prepare(TAOS* taos) { v.v8 = (int64_t)(i * 8); v.f4 = (float)(i * 40); v.f8 = (double)(i * 80); - for (int j = 0; j < sizeof(v.bin) - 1; ++j) { + for (int j = 0; j < sizeof(v.bin); ++j) { v.bin[j] = (char)(i + '0'); } @@ -556,7 +556,7 @@ void verify_prepare2(TAOS* taos) { v.v8[i] = (int64_t)(i * 8); v.f4[i] = (float)(i * 40); v.f8[i] = (double)(i * 80); - for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) { + for (int j = 0; j < sizeof(v.bin[0]); ++j) { v.bin[i][j] = (char)(i + '0'); } strcpy(v.blob[i], "一二三四五六七八九十"); @@ -808,7 +808,7 @@ void verify_prepare3(TAOS* taos) { v.v8[i] = (int64_t)(i * 8); v.f4[i] = (float)(i * 40); v.f8[i] = (double)(i * 80); - for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) { + for (int j = 0; j < sizeof(v.bin[0]); ++j) { v.bin[i][j] = (char)(i + '0'); } strcpy(v.blob[i], "一二三四五六七八九十"); @@ -1030,7 +1030,7 @@ int main(int argc, char *argv[]) { printf("server info: %s\n", info); info = taos_get_client_info(taos); printf("client info: %s\n", info); - + printf("************ verify shemaless *************\n"); verify_schema_less(taos); @@ -1049,14 +1049,12 @@ int main(int argc, char *argv[]) { printf("************ verify prepare2 *************\n"); verify_prepare2(taos); - printf("************ verify prepare3 *************\n"); verify_prepare3(taos); - + printf("************ verify stream *************\n"); verify_stream(taos); printf("done\n"); - taos_close(taos); taos_cleanup(); } From 8c423dd4ffbb2962ff5219a7531e918aa01bb46d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 26 Jul 2021 13:09:43 +0800 Subject: [PATCH 43/81] [TD-5521]: [schemaless]id can only contain digits/alphabet/underscore --- src/client/src/tscParseLineProtocol.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index d5883af7f6..08d27fd86f 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -1743,6 +1743,18 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index return TSDB_CODE_SUCCESS; } +//Table name can only contain digits(0-9),alphebet(a-z),underscore(_) +static int32_t isValidChildTableName(const char *pTbName, int16_t len) { + const char *cur = pTbName; + for (int i = 0; i < len; ++i) { + if(!isdigit(cur[i]) && !isalpha(cur[i]) && cur[i] != '_') { + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + } + return TSDB_CODE_SUCCESS; +} + + static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, const char **index, bool isField, TAOS_SML_DATA_POINT* smlData) { const char *cur = *index; @@ -1776,6 +1788,10 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, } if (!isField && (strcasecmp(pkv->key, "ID") == 0) && pkv->type == TSDB_DATA_TYPE_BINARY) { + ret = isValidChildTableName(pkv->value, pkv->length); + if (!ret) { + goto error; + } smlData->childTableName = malloc( pkv->length + 1); memcpy(smlData->childTableName, pkv->value, pkv->length); smlData->childTableName[pkv->length] = '\0'; From 2b99aa162ff52fcf2baec8056369b3bf5f0214cf Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 26 Jul 2021 13:39:39 +0800 Subject: [PATCH 44/81] [td-225]fix bug found by regression test. --- src/client/src/tscServer.c | 33 ++++++++++++++++++++++++++++++--- src/inc/taosmsg.h | 2 +- tests/examples/c/apitest.c | 2 +- 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 8a11cd6b93..8276ec4359 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2337,14 +2337,18 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { SSqlCmd* pCmd = &parent->cmd; SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); + char fName[TSDB_TABLE_FNAME_LEN] = {0}; for(int32_t i = 0; i < pStableVgroup->numOfTables; ++i) { char* name = pMsg; - pMsg += TSDB_TABLE_NAME_LEN; + pMsg += TSDB_TABLE_FNAME_LEN; STableMetaInfo *pInfo = NULL; for(int32_t j = 0; j < pQueryInfo->numOfTables; ++j) { STableMetaInfo *pInfo1 = tscGetTableMetaInfoFromCmd(pCmd, j); - if (strcmp(name, tNameGetTableName(&pInfo1->name)) != 0) { + memset(fName, 0, tListLen(fName)); + + tNameExtractFullName(&pInfo1->name, fName); + if (strcmp(name, fName) != 0) { continue; } @@ -2504,11 +2508,14 @@ int tscProcessUseDbRsp(SSqlObj *pSql) { return ret; } +//todo only invalid the buffered data that belongs to dropped databases int tscProcessDropDbRsp(SSqlObj *pSql) { //TODO LOCK DB WHEN MODIFY IT //pSql->pTscObj->db[0] = 0; taosHashClear(tscTableMetaMap); + taosHashClear(tscVgroupMap); + taosCacheEmpty(tscVgroupListBuf); return 0; } @@ -2892,6 +2899,10 @@ int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo) { return code; } +static void freeElem(void* p) { + tfree(*(char**)p); +} + /** * retrieve table meta from mnode, and then update the local table meta hashmap. * @param pSql sql object @@ -2921,7 +2932,23 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { size_t len = strlen(name); taosHashRemove(tscTableMetaMap, name, len); - return getTableMetaFromMnode(pSql, pTableMetaInfo, false); + if (pTableMeta->tableType == TSDB_SUPER_TABLE) { + void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len); + if (pv != NULL) { + taosCacheRelease(tscVgroupListBuf, &pv, true); + } + } + + SArray* pNameList = taosArrayInit(1, POINTER_BYTES); + SArray* vgroupList = taosArrayInit(1, POINTER_BYTES); + + char* n = strdup(name); + taosArrayPush(pNameList, &n); + code = getMultiTableMetaFromMnode(pSql, pNameList, vgroupList, NULL, tscTableMetaCallBack, true); + taosArrayDestroyEx(pNameList, freeElem); + taosArrayDestroyEx(vgroupList, freeElem); + + return code; } static bool allVgroupInfoRetrieved(SQueryInfo* pQueryInfo) { diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 9ee241efc1..2370b909ef 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -809,7 +809,7 @@ typedef struct SMultiTableMeta { int32_t contLen; uint8_t compressed; // denote if compressed or not uint32_t rawLen; // size before compress - uint8_t metaClone; // make meta clone after retrieve meta from mnode + uint8_t metaClone; // make meta clone after retrieve meta from mnode char meta[]; } SMultiTableMeta; diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index ac522d6151..2b0d36a40f 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -1009,7 +1009,7 @@ int main(int argc, char *argv[]) { info = taos_get_client_info(taos); printf("client info: %s\n", info); - printf("************ verify shemaless *************\n"); + printf("************ verify schema-less *************\n"); verify_schema_less(taos); From 19b965816060c2626a85dceb02acb60024b317ba Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 26 Jul 2021 15:51:59 +0800 Subject: [PATCH 45/81] add traceable id to log --- src/client/src/tscParseLineProtocol.c | 243 +++++++++++++++----------- 1 file changed, 143 insertions(+), 100 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 7645a30eee..3ae3f5c371 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -34,7 +34,7 @@ typedef struct { char* value; //=================================== - size_t fieldSchemaIdx; + uint32_t fieldSchemaIdx; } TAOS_SML_KV; typedef struct { @@ -42,14 +42,14 @@ typedef struct { char* childTableName; TAOS_SML_KV* tags; - int tagNum; + int32_t tagNum; // first kv must be timestamp TAOS_SML_KV* fields; - int fieldNum; + int32_t fieldNum; //================================ - size_t schemaIdx; + uint32_t schemaIdx; } TAOS_SML_DATA_POINT; typedef enum { @@ -60,6 +60,10 @@ typedef enum { SML_TIME_STAMP_NANO_SECONDS } SMLTimeStampType; +typedef struct { + uint64_t id; + +} SSmlLinesInfo; //================================================================================================= int compareSmlColKv(const void* p1, const void* p2) { @@ -102,7 +106,7 @@ typedef struct { }; } SSchemaAction; -static int32_t getFieldBytesFromSmlKv(TAOS_SML_KV* kv, int32_t* bytes) { +static int32_t getFieldBytesFromSmlKv(TAOS_SML_KV* kv, int32_t* bytes, uint64_t id) { if (!IS_VAR_DATA_TYPE(kv->type)) { *bytes = tDataTypes[kv->type].bytes; } else { @@ -112,7 +116,7 @@ static int32_t getFieldBytesFromSmlKv(TAOS_SML_KV* kv, int32_t* bytes) { bool succ = taosMbsToUcs4(kv->value, kv->length, ucs, kv->length * TSDB_NCHAR_SIZE, &bytesNeeded); if (!succ) { free(ucs); - tscError("convert nchar string to UCS4_LE failed:%s", kv->value); + tscError("SML:0x%"PRIx64" convert nchar string to UCS4_LE failed:%s", id, kv->value); return TSDB_CODE_TSC_INVALID_VALUE; } free(ucs); @@ -124,7 +128,7 @@ static int32_t getFieldBytesFromSmlKv(TAOS_SML_KV* kv, int32_t* bytes) { return 0; } -static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* array) { +static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* array, SSmlLinesInfo* info) { SSchema* pField = NULL; size_t* pFieldIdx = taosHashGet(hash, smlKv->key, strlen(smlKv->key)); size_t fieldIdx = -1; @@ -134,12 +138,12 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra pField = taosArrayGet(array, fieldIdx); if (pField->type != smlKv->type) { - tscError("type mismatch. key %s, type %d. type before %d", smlKv->key, smlKv->type, pField->type); + tscError("SML:0x%"PRIx64" type mismatch. key %s, type %d. type before %d", info->id, smlKv->key, smlKv->type, pField->type); return TSDB_CODE_TSC_INVALID_VALUE; } int32_t bytes = 0; - code = getFieldBytesFromSmlKv(smlKv, &bytes); + code = getFieldBytesFromSmlKv(smlKv, &bytes, info->id); if (code != 0) { return code; } @@ -153,7 +157,7 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra field.type = smlKv->type; int32_t bytes = 0; - code = getFieldBytesFromSmlKv(smlKv, &bytes); + code = getFieldBytesFromSmlKv(smlKv, &bytes, info->id); if (code != 0) { return code; } @@ -169,7 +173,7 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra return 0; } -static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas) { +static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas, SSmlLinesInfo* info) { int32_t code = 0; SHashObj* sname2shema = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); @@ -199,18 +203,18 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, for (int j = 0; j < point->tagNum; ++j) { TAOS_SML_KV* tagKv = point->tags + j; - code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags); + code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags, info); if (code != 0) { - tscError("build data point schema failed. point no.: %d, tag key: %s", i, tagKv->key); + tscError("SML:0x%"PRIx64" build data point schema failed. point no.: %d, tag key: %s", info->id, i, tagKv->key); return code; } } for (int j = 0; j < point->fieldNum; ++j) { TAOS_SML_KV* fieldKv = point->fields + j; - code = buildSmlKvSchema(fieldKv, pStableSchema->fieldHash, pStableSchema->fields); + code = buildSmlKvSchema(fieldKv, pStableSchema->fieldHash, pStableSchema->fields, info); if (code != 0) { - tscError("build data point schema failed. point no.: %d, tag key: %s", i, fieldKv->key); + tscError("SML:0x%"PRIx64" build data point schema failed. point no.: %d, tag key: %s", info->id, i, fieldKv->key); return code; } } @@ -226,7 +230,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, } taosHashCleanup(sname2shema); - tscDebug("build point schema succeed. num of super table: %zu", numStables); + tscDebug("SML:0x%"PRIx64" build point schema succeed. num of super table: %zu", info->id, numStables); for (int32_t i = 0; i < numStables; ++i) { SSmlSTableSchema* schema = taosArrayGet(stableSchemas, i); tscDebug("\ttable name: %s, tags number: %zu, fields number: %zu", schema->sTableName, @@ -237,7 +241,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, } static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[], - SSchemaAction* action, bool* actionNeeded) { + SSchemaAction* action, bool* actionNeeded, SSmlLinesInfo* info) { char fieldNameLowerCase[TSDB_COL_NAME_LEN] = {0}; strtolower(fieldNameLowerCase, pointColField->name); @@ -246,7 +250,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash SSchema* dbAttr = taosArrayGet(dbAttrArray, *pDbIndex); assert(strcasecmp(dbAttr->name, pointColField->name) == 0); if (pointColField->type != dbAttr->type) { - tscError("point type and db type mismatch. key: %s. point type: %d, db type: %d", pointColField->name, + tscError("SML:0x%"PRIx64" point type and db type mismatch. key: %s. point type: %d, db type: %d", info->id, pointColField->name, pointColField->type, dbAttr->type); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -273,7 +277,10 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash action->alterSTable.field = pointColField; *actionNeeded = true; } - tscDebug("generate schema action. action needed: %d, action: %d", *actionNeeded, action->action); + if (*actionNeeded) { + tscDebug("SML:0x%" PRIx64 " generate schema action. column name: %s, action: %d", info->id, fieldNameLowerCase, + action->action); + } return 0; } @@ -299,13 +306,13 @@ static int32_t buildColumnDescription(SSchema* field, } -static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action) { +static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInfo* info) { int32_t code = 0; int32_t outBytes = 0; char *result = (char *)calloc(1, tsMaxSQLStringLen+1); int32_t capacity = tsMaxSQLStringLen + 1; - tscDebug("apply schema action: %d", action->action); + tscDebug("SML:0x%"PRIx64" apply schema action. action: %d", info->id, action->action); switch (action->action) { case SCHEMA_ACTION_ADD_COLUMN: { int n = sprintf(result, "alter stable %s add column ", action->alterSTable.sTableName); @@ -378,7 +385,7 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action) { free(result); if (code != 0) { - tscError("apply schema action failure. %s", tstrerror(code)); + tscError("SML:0x%"PRIx64 "apply schema action failure. %s", info->id, tstrerror(code)); } return code; } @@ -391,7 +398,7 @@ static int32_t destroySmlSTableSchema(SSmlSTableSchema* schema) { return 0; } -int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { +int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) { int32_t code = 0; STscObj *pObj = (STscObj *)taos; @@ -400,7 +407,7 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { return TSDB_CODE_TSC_DISCONNECTED; } - tscDebug("load table schema. super table name: %s", tableName); + tscDebug("SML:0x%"PRIx64" load table schema. super table name: %s", info->id, tableName); char tableNameLowerCase[TSDB_TABLE_NAME_LEN]; strtolower(tableNameLowerCase, tableName); @@ -410,7 +417,7 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { TAOS_RES* res = taos_query(taos, sql); code = taos_errno(res); if (code != 0) { - tscError("describe table failure. %s", taos_errstr(res)); + tscError("SML:0x%"PRIx64" describe table failure. %s", info->id, taos_errstr(res)); taos_free_result(res); return code; } @@ -474,13 +481,13 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { size_t tagIndex = taosArrayGetSize(schema->tags) - 1; taosHashPut(schema->tagHash, field.name, strlen(field.name), &tagIndex, sizeof(tagIndex)); } - tscDebug("load table meta succeed. %s, columns number: %d, tag number: %d, precision: %d", - tableName, tableMeta->tableInfo.numOfColumns, tableMeta->tableInfo.numOfTags, schema->precision); + tscDebug("SML:0x%"PRIx64 "load table meta succeed. %s, columns number: %d, tag number: %d, precision: %d", + info->id, tableName, tableMeta->tableInfo.numOfColumns, tableMeta->tableInfo.numOfTags, schema->precision); free(tableMeta); tableMeta = NULL; return code; } -static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { +static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* info) { int32_t code = 0; size_t numStable = taosArrayGetSize(stableSchemas); for (int i = 0; i < numStable; ++i) { @@ -488,7 +495,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { SSmlSTableSchema dbSchema; memset(&dbSchema, 0, sizeof(SSmlSTableSchema)); - code = loadTableMeta(taos, pointSchema->sTableName, &dbSchema); + code = loadTableMeta(taos, pointSchema->sTableName, &dbSchema, info); if (code == TSDB_CODE_MND_INVALID_TABLE_NAME) { SSchemaAction schemaAction = {0}; schemaAction.action = SCHEMA_ACTION_CREATE_STABLE; @@ -496,10 +503,10 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { memcpy(schemaAction.createSTable.sTableName, pointSchema->sTableName, TSDB_TABLE_NAME_LEN); schemaAction.createSTable.tags = pointSchema->tags; schemaAction.createSTable.fields = pointSchema->fields; - applySchemaAction(taos, &schemaAction); - code = loadTableMeta(taos, pointSchema->sTableName, &dbSchema); + applySchemaAction(taos, &schemaAction, info); + code = loadTableMeta(taos, pointSchema->sTableName, &dbSchema, info); if (code != 0) { - tscError("reconcile point schema failed. can not create %s", pointSchema->sTableName); + tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName); return code; } else { pointSchema->precision = dbSchema.precision; @@ -516,9 +523,10 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { SSchema* pointTag = taosArrayGet(pointSchema->tags, j); SSchemaAction schemaAction = {0}; bool actionNeeded = false; - generateSchemaAction(pointTag, dbTagHash, dbSchema.tags, true, pointSchema->sTableName, &schemaAction, &actionNeeded); + generateSchemaAction(pointTag, dbTagHash, dbSchema.tags, true, pointSchema->sTableName, + &schemaAction, &actionNeeded, info); if (actionNeeded) { - code = applySchemaAction(taos, &schemaAction); + code = applySchemaAction(taos, &schemaAction, info); if (code != 0) { destroySmlSTableSchema(&dbSchema); return code; @@ -534,9 +542,10 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { SSchema* pointCol = taosArrayGet(pointSchema->fields, j); SSchemaAction schemaAction = {0}; bool actionNeeded = false; - generateSchemaAction(pointCol, dbFieldHash, dbSchema.fields,false, pointSchema->sTableName, &schemaAction, &actionNeeded); + generateSchemaAction(pointCol, dbFieldHash, dbSchema.fields,false, pointSchema->sTableName, + &schemaAction, &actionNeeded, info); if (actionNeeded) { - code = applySchemaAction(taos, &schemaAction); + code = applySchemaAction(taos, &schemaAction, info); if (code != 0) { destroySmlSTableSchema(&dbSchema); return code; @@ -548,23 +557,28 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas) { destroySmlSTableSchema(&dbSchema); } else { - tscError("load table meta error: %s", tstrerror(code)); + tscError("SML:0x%"PRIx64" load table meta error: %s", info->id, tstrerror(code)); return code; } } return 0; } -static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen) { - tscDebug("taos_sml_insert get child table name through md5"); +static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen, + SSmlLinesInfo* info) { + tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id); qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv); SStringBuilder sb; memset(&sb, 0, sizeof(sb)); - taosStringBuilderAppendString(&sb, point->stableName); + char sTableName[TSDB_TABLE_NAME_LEN] = {0}; + strtolower(sTableName, point->stableName); + taosStringBuilderAppendString(&sb, sTableName); for (int j = 0; j < point->tagNum; ++j) { taosStringBuilderAppendChar(&sb, ','); TAOS_SML_KV* tagKv = point->tags + j; - taosStringBuilderAppendString(&sb, tagKv->key); + char tagName[TSDB_COL_NAME_LEN] = {0}; + strtolower(tagName, tagKv->key); + taosStringBuilderAppendString(&sb, tagName); taosStringBuilderAppendChar(&sb, '='); taosStringBuilderAppend(&sb, tagKv->value, tagKv->length); } @@ -580,12 +594,12 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11], context.digest[12], context.digest[13], context.digest[14], context.digest[15]); taosStringBuilderDestroy(&sb); - tscDebug("child table name: %s", tableName); + tscDebug("SML:0x%"PRIx64" child table name: %s", info->id, tableName); return 0; } -static int32_t changeChildTableTagValue(TAOS* taos, const char* cTableName, const char* tagName, TAOS_BIND* bind) { +static int32_t changeChildTableTagValue(TAOS* taos, const char* cTableName, const char* tagName, TAOS_BIND* bind, SSmlLinesInfo* info) { char sql[512]; sprintf(sql, "alter table %s set tag %s=?", cTableName, tagName); @@ -594,31 +608,32 @@ static int32_t changeChildTableTagValue(TAOS* taos, const char* cTableName, cons code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql)); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } code = taos_stmt_bind_param(stmt, bind); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } code = taos_stmt_execute(stmt); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } code = taos_stmt_close(stmt); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } return code; } -static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, const char* sTableName, SArray* tagsSchema, SArray* tagsBind) { +static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, const char* sTableName, + SArray* tagsSchema, SArray* tagsBind, SSmlLinesInfo* info) { size_t numTags = taosArrayGetSize(tagsSchema); char* sql = malloc(tsMaxSQLStringLen+1); int freeBytes = tsMaxSQLStringLen + 1; @@ -639,7 +654,7 @@ static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, co snprintf(sql + strlen(sql) - 1, freeBytes-strlen(sql)+1, ")"); sql[strlen(sql)] = '\0'; - tscDebug("create table : %s", sql); + tscDebug("SML:0x%"PRIx64" create table : %s", info->id, sql); TAOS_STMT* stmt = taos_stmt_init(taos); int32_t code; @@ -647,31 +662,31 @@ static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, co free(sql); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } code = taos_stmt_bind_param(stmt, TARRAY_GET_START(tagsBind)); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } code = taos_stmt_execute(stmt); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } code = taos_stmt_close(stmt); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } return code; } -static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* colsSchema, SArray* rowsBind) { +static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* colsSchema, SArray* rowsBind, SSmlLinesInfo* info) { size_t numCols = taosArrayGetSize(colsSchema); char* sql = malloc(tsMaxSQLStringLen+1); int32_t freeBytes = tsMaxSQLStringLen + 1 ; @@ -689,7 +704,7 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols snprintf(sql + strlen(sql)-1, freeBytes-strlen(sql)+1, ")"); sql[strlen(sql)] = '\0'; - tscDebug("insert rows %zu into child table %s. ", taosArrayGetSize(rowsBind), cTableName); + tscDebug("SML:0x%"PRIx64" insert rows into child table %s. num of rows: %zu", info->id, cTableName, taosArrayGetSize(rowsBind)); int32_t code = 0; int32_t try = 0; @@ -700,14 +715,14 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols free(sql); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } do { code = taos_stmt_set_tbname(stmt, cTableName); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } @@ -716,24 +731,24 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols TAOS_BIND* colsBinds = taosArrayGetP(rowsBind, i); code = taos_stmt_bind_param(stmt, colsBinds); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } code = taos_stmt_add_batch(stmt); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); return code; } } code = taos_stmt_execute(stmt); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); } } while (code == TSDB_CODE_TDB_TABLE_RECONFIGURE && try++ < TSDB_MAX_REPLICA); if (code != 0) { - tscError("%s", taos_stmt_errstr(stmt)); + tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt)); taos_stmt_close(stmt); } else { taos_stmt_close(stmt); @@ -743,13 +758,13 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols } static int32_t arrangePointsByChildTableName(TAOS_SML_DATA_POINT* points, int numPoints, - SHashObj* cname2points, SArray* stableSchemas) { + SHashObj* cname2points, SArray* stableSchemas, SSmlLinesInfo* info) { for (int32_t i = 0; i < numPoints; ++i) { TAOS_SML_DATA_POINT * point = points + i; if (!point->childTableName) { char childTableName[TSDB_TABLE_NAME_LEN]; int32_t tableNameLen = TSDB_TABLE_NAME_LEN; - getSmlMd5ChildTableName(point, childTableName, &tableNameLen); + getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info); point->childTableName = calloc(1, tableNameLen+1); strncpy(point->childTableName, childTableName, tableNameLen); point->childTableName[tableNameLen] = '\0'; @@ -790,7 +805,7 @@ static int32_t arrangePointsByChildTableName(TAOS_SML_DATA_POINT* points, int nu } static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableName, - SSmlSTableSchema* sTableSchema, SArray* cTablePoints) { + SSmlSTableSchema* sTableSchema, SArray* cTablePoints, SSmlLinesInfo* info) { size_t numTags = taosArrayGetSize(sTableSchema->tags); size_t rows = taosArrayGetSize(cTablePoints); @@ -846,7 +861,7 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam int32_t code = taos_errno(result); if (code != 0) { - tscError("get child table %s tags failed. error string %s", cTableName, taos_errstr(result)); + tscError("SML:0x%"PRIx64" get child table %s tags failed. error string %s", info->id, cTableName, taos_errstr(result)); goto cleanup; } @@ -863,8 +878,8 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam TAOS_SML_KV* tagKV = tagKVs[notNullTagsIndices[i-1]]; if (tagKV->type != dbType) { - tscError("child table %s tag %s type mismatch. point type : %d, db type : %d", - cTableName, tagKV->key, tagKV->type, dbType); + tscError("SML:0x%"PRIx64" child table %s tag %s type mismatch. point type : %d, db type : %d", + info->id, cTableName, tagKV->key, tagKV->type, dbType); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -872,16 +887,16 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam if (val == NULL || length != tagKV->length || memcmp(tagKV->value, val, length) != 0) { TAOS_BIND* bind = taosArrayGet(tagBinds, tagKV->fieldSchemaIdx); - code = changeChildTableTagValue(taos, cTableName, tagKV->key, bind); + code = changeChildTableTagValue(taos, cTableName, tagKV->key, bind, info); if (code != 0) { - tscError("change child table tag failed. table name %s, tag %s", cTableName, tagKV->key); + tscError("SML:0x%"PRIx64" change child table tag failed. table name %s, tag %s", info->id, cTableName, tagKV->key); goto cleanup; } } } - tscDebug("successfully applied point tags. child table: %s", cTableName); + tscDebug("SML:0x%"PRIx64" successfully applied point tags. child table: %s", info->id, cTableName); } else { - code = creatChildTableIfNotExists(taos, cTableName, sTableName, sTableSchema->tags, tagBinds); + code = creatChildTableIfNotExists(taos, cTableName, sTableName, sTableSchema->tags, tagBinds, info); if (code != 0) { goto cleanup; } @@ -897,7 +912,8 @@ cleanup: return code; } -static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema, char* cTableName, SArray* cTablePoints) { +static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema, char* cTableName, + SArray* cTablePoints, SSmlLinesInfo* info) { int32_t code = TSDB_CODE_SUCCESS; size_t numCols = taosArrayGetSize(sTableSchema->fields); @@ -909,8 +925,8 @@ static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema, TAOS_BIND* colBinds = calloc(numCols, sizeof(TAOS_BIND)); if (colBinds == NULL) { - tscError("taos_sml_insert insert points, failed to allocated memory for TAOS_BIND, " - "num of rows: %zu, num of cols: %zu", rows, numCols); + tscError("SML:0x%"PRIx64" taos_sml_insert insert points, failed to allocated memory for TAOS_BIND, " + "num of rows: %zu, num of cols: %zu", info->id, rows, numCols); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -931,9 +947,9 @@ static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema, taosArrayPush(rowsBind, &colBinds); } - code = insertChildTableBatch(taos, cTableName, sTableSchema->fields, rowsBind); + code = insertChildTableBatch(taos, cTableName, sTableSchema->fields, rowsBind, info); if (code != 0) { - tscError("insert into child table %s failed. error %s", cTableName, tstrerror(code)); + tscError("SML:0x%"PRIx64" insert into child table %s failed. error %s", info->id, cTableName, tstrerror(code)); } for (int i = 0; i < rows; ++i) { @@ -948,30 +964,35 @@ static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema, return code; } -static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t numPoints, SArray* stableSchemas) { +static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t numPoints, SArray* stableSchemas, SSmlLinesInfo* info) { int32_t code = TSDB_CODE_SUCCESS; SHashObj* cname2points = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); - arrangePointsByChildTableName(points, numPoints, cname2points, stableSchemas); + arrangePointsByChildTableName(points, numPoints, cname2points, stableSchemas, info); SArray** pCTablePoints = taosHashIterate(cname2points, NULL); while (pCTablePoints) { SArray* cTablePoints = *pCTablePoints; + TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, 0); SSmlSTableSchema* sTableSchema = taosArrayGet(stableSchemas, point->schemaIdx); - code = applyChildTableTags(taos, point->childTableName, point->stableName, sTableSchema, cTablePoints); + + tscDebug("SML:0x%"PRIx64" apply child table tags. child table: %s", info->id, point->childTableName); + code = applyChildTableTags(taos, point->childTableName, point->stableName, sTableSchema, cTablePoints, info); if (code != 0) { tscError("apply child table tags failed. child table %s, error %s", point->childTableName, tstrerror(code)); goto cleanup; } - code = applyChildTableFields(taos, sTableSchema, point->childTableName, cTablePoints); + + tscDebug("SML:0x%"PRIx64" apply child table points. child table: %s", info->id, point->childTableName); + code = applyChildTableFields(taos, sTableSchema, point->childTableName, cTablePoints, info); if (code != 0) { tscError("Apply child table fields failed. child table %s, error %s", point->childTableName, tstrerror(code)); goto cleanup; } - tscDebug("successfully applied data points of child table %s", point->childTableName); + tscDebug("SML:0x%"PRIx64" successfully applied data points of child table %s", info->id, point->childTableName); pCTablePoints = taosHashIterate(cname2points, pCTablePoints); } @@ -987,27 +1008,30 @@ cleanup: return code; } -int taos_sml_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint) { - tscDebug("taos_sml_insert. number of points: %d", numPoint); +int taos_sml_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info) { + tscDebug("SML:0x%"PRIx64" taos_sml_insert. number of points: %d", info->id, numPoint); int32_t code = TSDB_CODE_SUCCESS; + tscDebug("SML:0x%"PRIx64" build data point schemas", info->id); SArray* stableSchemas = taosArrayInit(32, sizeof(SSmlSTableSchema)); // SArray - code = buildDataPointSchemas(points, numPoint, stableSchemas); + code = buildDataPointSchemas(points, numPoint, stableSchemas, info); if (code != 0) { - tscError("error building data point schemas : %s", tstrerror(code)); + tscError("SML:0x%"PRIx64" error building data point schemas : %s", info->id, tstrerror(code)); goto clean_up; } - code = modifyDBSchemas(taos, stableSchemas); + tscDebug("SML:0x%"PRIx64" modify db schemas", info->id); + code = modifyDBSchemas(taos, stableSchemas, info); if (code != 0) { - tscError("error change db schema : %s", tstrerror(code)); + tscError("SML:0x%"PRIx64" error change db schema : %s", info->id, tstrerror(code)); goto clean_up; } - code = applyDataPoints(taos, points, numPoint, stableSchemas); + tscDebug("SML:0x%"PRIx64" apply data points", info->id); + code = applyDataPoints(taos, points, numPoint, stableSchemas, info); if (code != 0) { - tscError("error apply data points : %s", tstrerror(code)); + tscError("SML:0x%"PRIx64" error apply data points : %s", info->id, tstrerror(code)); } clean_up: @@ -1902,6 +1926,18 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData) { //========================================================================= +static uint64_t linesSmlHandleId = 0; + +uint64_t genLinesSmlId() { + uint64_t id; + + do { + id = atomic_add_fetch_64(&linesSmlHandleId, 1); + } while (id == 0); + + return id; +} + void destroySmlDataPoint(TAOS_SML_DATA_POINT* point) { for (int i=0; itagNum; ++i) { free((point->tags+i)->key); @@ -1917,16 +1953,16 @@ void destroySmlDataPoint(TAOS_SML_DATA_POINT* point) { free(point->childTableName); } -int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* failedLines) { +int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* failedLines, SSmlLinesInfo* info) { for (int32_t i = 0; i < numLines; ++i) { TAOS_SML_DATA_POINT point = {0}; int32_t code = tscParseLine(lines[i], &point); if (code != TSDB_CODE_SUCCESS) { - tscError("data point line parse failed. line %d : %s", i, lines[i]); + tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]); destroySmlDataPoint(&point); return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; } else { - tscDebug("data point line parse success. line %d", i); + tscDebug("SML:0x%"PRIx64" data point line parse success. line %d", info->id, i); } taosArrayPush(points, &point); @@ -1937,15 +1973,19 @@ int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* faile int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { int32_t code = 0; + SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo)); + info->id = genLinesSmlId(); + if (numLines <= 0 || numLines > 65536) { - tscError("taos_insert_lines numLines should be between 1 and 65536. numLines: %d", numLines); + tscError("SML:0x%"PRIx64" taos_insert_lines numLines should be between 1 and 65536. numLines: %d", info->id, numLines); code = TSDB_CODE_TSC_APP_ERROR; return code; } for (int i = 0; i < numLines; ++i) { if (lines[i] == NULL) { - tscError("taos_insert_lines line %d is NULL", i); + tscError("SML:0x%"PRIx64" taos_insert_lines line %d is NULL", info->id, i); + free(info); code = TSDB_CODE_TSC_APP_ERROR; return code; } @@ -1953,12 +1993,13 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { SArray* lpPoints = taosArrayInit(numLines, sizeof(TAOS_SML_DATA_POINT)); if (lpPoints == NULL) { - tscError("taos_insert_lines failed to allocate memory"); + tscError("SML:0x%"PRIx64" taos_insert_lines failed to allocate memory", info->id); + free(info); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - tscDebug("taos_insert_lines begin inserting %d lines, first line: %s", numLines, lines[0]); - code = tscParseLines(lines, numLines, lpPoints, NULL); + tscDebug("SML:0x%"PRIx64" taos_insert_lines begin inserting %d lines, first line: %s", info->id, numLines, lines[0]); + code = tscParseLines(lines, numLines, lpPoints, NULL, info); size_t numPoints = taosArrayGetSize(lpPoints); if (code != 0) { @@ -1966,13 +2007,13 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { } TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints); - code = taos_sml_insert(taos, points, (int)numPoints); + code = taos_sml_insert(taos, points, (int)numPoints, info); if (code != 0) { - tscError("taos_sml_insert error: %s", tstrerror((code))); + tscError("SML:0x%"PRIx64" taos_sml_insert error: %s", info->id, tstrerror((code))); } cleanup: - tscDebug("taos_insert_lines finish inserting %d lines. code: %d", numLines, code); + tscDebug("SML:0x%"PRIx64" taos_insert_lines finish inserting %d lines. code: %d", info->id, numLines, code); points = TARRAY_GET_START(lpPoints); numPoints = taosArrayGetSize(lpPoints); for (int i=0; i Date: Mon, 26 Jul 2021 16:04:13 +0800 Subject: [PATCH 46/81] fix windows compilation error --- src/client/src/tscParseLineProtocol.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 3ae3f5c371..b18a0527d8 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -168,7 +168,7 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra taosHashPut(hash, field.name, tagKeyLen, &fieldIdx, sizeof(fieldIdx)); } - smlKv->fieldSchemaIdx = fieldIdx; + smlKv->fieldSchemaIdx = (uint32_t)fieldIdx; return 0; } @@ -219,7 +219,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, } } - point->schemaIdx = stableIdx; + point->schemaIdx = (uint32_t)stableIdx; } size_t numStables = taosArrayGetSize(stableSchemas); From 9bb0b4bd9344ff7d7e7fefc37d7ca859abec9c3e Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 26 Jul 2021 13:09:43 +0800 Subject: [PATCH 47/81] [TD-5521]: [schemaless]id can only contain digits/alphabet/underscore --- src/client/src/tscParseLineProtocol.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 08d27fd86f..1dcbf12290 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -1747,7 +1747,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index static int32_t isValidChildTableName(const char *pTbName, int16_t len) { const char *cur = pTbName; for (int i = 0; i < len; ++i) { - if(!isdigit(cur[i]) && !isalpha(cur[i]) && cur[i] != '_') { + if(!isdigit(cur[i]) && !isalpha(cur[i]) && (cur[i] != '_')) { return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; } } @@ -1789,7 +1789,7 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, if (!isField && (strcasecmp(pkv->key, "ID") == 0) && pkv->type == TSDB_DATA_TYPE_BINARY) { ret = isValidChildTableName(pkv->value, pkv->length); - if (!ret) { + if (ret) { goto error; } smlData->childTableName = malloc( pkv->length + 1); From 5dfc8f0118e64223f77b18efe2d72c6e045915c7 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 26 Jul 2021 17:24:22 +0800 Subject: [PATCH 48/81] [TD-4669] : fix redundant space in timestamp. --- documentation20/en/01.evaluation/docs.md | 2 +- documentation20/en/03.architecture/docs.md | 2 +- documentation20/en/05.insert/docs.md | 2 +- documentation20/en/06.queries/docs.md | 2 +- documentation20/en/08.connector/docs.md | 6 +++--- documentation20/en/11.administrator/docs.md | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/documentation20/en/01.evaluation/docs.md b/documentation20/en/01.evaluation/docs.md index 50948efbd8..250f465d7b 100644 --- a/documentation20/en/01.evaluation/docs.md +++ b/documentation20/en/01.evaluation/docs.md @@ -11,7 +11,7 @@ One of the modules of TDengine is the time-series database. However, in addition - **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance. - **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB. - **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to. -- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost. +- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost. With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources. diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index 2e5fc7bd18..64696d79ad 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -172,7 +172,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. -**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C + + language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C + +/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. +**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. ### Node Communication diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md index 23576099cf..88746ea608 100644 --- a/documentation20/en/05.insert/docs.md +++ b/documentation20/en/05.insert/docs.md @@ -4,7 +4,7 @@ TDengine supports multiple interfaces to write data, including SQL, Prometheus, ## SQL Writing -Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: +Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: ```mysql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); diff --git a/documentation20/en/06.queries/docs.md b/documentation20/en/06.queries/docs.md index f5d2b43e2b..c4f1359820 100644 --- a/documentation20/en/06.queries/docs.md +++ b/documentation20/en/06.queries/docs.md @@ -2,7 +2,7 @@ ## Main Query Features -TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions: +TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions: - Single-column and multi-column data query - Multiple filters for tags and numeric values: >, <, =, < >, like, etc diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index 9b3940a2e8..9cbd395206 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -22,7 +22,7 @@ Note: ● stands for that has been verified by official tests; ○ stands for th Note: - To access the TDengine database through connectors (except RESTful) in the system without TDengine server software, it is necessary to install the corresponding version of the client installation package to make the application driver (the file name is [libtaos.so](http://libtaos.so/) in Linux system and taos.dll in Windows system) installed in the system, otherwise, the error that the corresponding library file cannot be found will occur. -- All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C + + Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined. +- All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C++ Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined. - Users upgrading to TDengine 2.0. 8.0 must update the JDBC connection. TDengine must upgrade taos-jdbcdriver to 2.0.12 and above. - No matter which programming language connector is selected, TDengine version 2.0 and above recommends that each thread of database application establish an independent connection or establish a connection pool based on threads to avoid mutual interference between threads of "USE statement" state variables in the connection (but query and write operations of the connection are thread-safe). @@ -152,7 +152,7 @@ Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and y | **OS Type** | Linux | Win64 | Win32 | Linux | Linux | | **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **In development** | -The C/C + + API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include): +The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include): ```C #include @@ -923,7 +923,7 @@ Manually install the following tools: If the steps above cannot be performed successfully, you can refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules). -If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C + + compilers and libraries for ARM64" and "Visual C + + ATL for ARM64". +If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64". #### Sample diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index 509b7fa62f..90bfdbe9c6 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -240,7 +240,7 @@ Client configuration parameters: ```sql SELECT count(*) FROM table_name WHERE TS<1554984068000; ``` - In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15: 52: 01.123 +08:00, or ISO-8601 format timestamp strings 2013-04-12T15: 52: 01.123 +0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located. + In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15:52:01.123+08:00, or ISO-8601 format timestamp strings 2013-04-12T15:52:01.123+0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located. When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg. From c63d6f4c94344f7ec4564b137791edd1829ac0fb Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Mon, 26 Jul 2021 18:00:02 +0800 Subject: [PATCH 49/81] [TD-5369] fulltest.sh --- tests/pytest/fulltest.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index ab7dedc959..923934ac69 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -161,7 +161,11 @@ python3 test.py -f tools/taosdemoTestTblAlt.py python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py - +# nano support +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py # update @@ -363,15 +367,15 @@ python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_cacheLastRow.py python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py - -# nano support -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py python3 ./test.py -f insert/flushwhiledrop.py #======================p4-end=============== python3 test.py -f tools/taosdemoAllTest/pytest.py + + + + + + From 753135de699f2cec270fbbe261c5a43df96729a2 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 26 Jul 2021 22:24:05 +0800 Subject: [PATCH 50/81] [TD-5533]:add 'ctrl+u' and 'ctrl+k' shortcut for taosshell --- src/kit/shell/inc/shellCommand.h | 2 ++ src/kit/shell/src/shellCommand.c | 22 ++++++++++++++++++++++ src/kit/shell/src/shellDarwin.c | 6 ++++++ src/kit/shell/src/shellLinux.c | 6 ++++++ 4 files changed, 36 insertions(+) diff --git a/src/kit/shell/inc/shellCommand.h b/src/kit/shell/inc/shellCommand.h index a08c1f48d1..6e4d3e382e 100644 --- a/src/kit/shell/inc/shellCommand.h +++ b/src/kit/shell/inc/shellCommand.h @@ -35,6 +35,8 @@ struct Command { }; extern void backspaceChar(Command *cmd); +extern void clearLineBefore(Command *cmd); +extern void clearLineAfter(Command *cmd); extern void deleteChar(Command *cmd); extern void moveCursorLeft(Command *cmd); extern void moveCursorRight(Command *cmd); diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c index e1a3dfe102..67e0c94989 100644 --- a/src/kit/shell/src/shellCommand.c +++ b/src/kit/shell/src/shellCommand.c @@ -102,6 +102,28 @@ void backspaceChar(Command *cmd) { } } +void clearLineBefore(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + memmove(cmd->command, cmd->command + cmd->cursorOffset, + cmd->commandSize - cmd->cursorOffset); + cmd->commandSize -= cmd->cursorOffset; + cmd->cursorOffset = 0; + cmd->screenOffset = 0; + cmd->endOffset = cmd->commandSize; + showOnScreen(cmd); +} + +void clearLineAfter(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + cmd->commandSize -= cmd->endOffset - cmd->cursorOffset; + cmd->endOffset = cmd->cursorOffset; + showOnScreen(cmd); +} + void deleteChar(Command *cmd) { assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 86c0fea573..4dcd8b3d50 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) { updateBuffer(&cmd); } break; + case 11: // Ctrl + K; + clearLineAfter(&cmd); + break; case 12: // Ctrl + L; system("clear"); showOnScreen(&cmd); break; + case 21: // Ctrl + U + clearLineBefore(&cmd); + break; } } else if (c == '\033') { c = getchar(); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 2a32a8d82e..dc74f6fcaa 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) { updateBuffer(&cmd); } break; + case 11: // Ctrl + K; + clearLineAfter(&cmd); + break; case 12: // Ctrl + L; system("clear"); showOnScreen(&cmd); break; + case 21: // Ctrl + U; + clearLineBefore(&cmd); + break; } } else if (c == '\033') { c = (char)getchar(); From 9a3e6db6a76646748432ebf71917b184b8fdf504 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Tue, 27 Jul 2021 08:55:45 +0800 Subject: [PATCH 51/81] [TD-5213]:test operator --- tests/pytest/query/operator.py | 536 +++++++++++++++++++++++++++++++++ 1 file changed, 536 insertions(+) create mode 100644 tests/pytest/query/operator.py diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py new file mode 100644 index 0000000000..774a1e5f42 --- /dev/null +++ b/tests/pytest/query/operator.py @@ -0,0 +1,536 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5074 + + startTime = time.time() + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + tdSql.execute('''create table table_21 using stable_2 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdLog.info("========== operator=1(OP_TableScan) ==========") + tdLog.info("========== operator=7(OP_Project) ==========") + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=6(OP_Aggregate) ==========") + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=9(OP_Limit) ==========") + sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' + tdSql.query(sql) + tdSql.checkRows(5) + sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=16(OP_DummyInput) ==========") + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + + tdLog.info("========== operator=10(OP_SLimit) ==========") + sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=20(OP_Distinct) ==========") + tdLog.info("========== operator=4(OP_TagScan) ==========") + sql = '''select distinct(t_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + sql = '''select distinct(loc) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + sql = '''select distinct(tbname) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + + tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") + sql = '''select last(q_int),first(q_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bool),first(q_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_binary),first(q_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_float),first(q_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_double),first(q_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_ts),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), + first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), + first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=8(OP_Groupby) ==========") + sql = '''select stddev(q_int) from table_0 group by q_int;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' + tdSql.query(sql) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' + tdSql.query(sql) + + tdLog.info("========== operator=11(OP_TimeWindow) ==========") + sql = '''select last(q_int) from table_0 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=12(OP_SessionWindow) ==========") + sql = '''select count(*) from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*) from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=13(OP_Fill) ==========") + sql = '''select sum(q_int) from table_0 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + #TD-5190 + sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + + tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") + sql = '''select avg(q_int) from stable_1 where ts=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having sum(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having avg(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having min(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having max(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having first(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having last(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + + tdLog.info("========== operator=21(OP_Join) ==========") + sql = '''select t1.q_int,t2.q_int from + (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from table_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from table_0) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from stable_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.*,t3.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 + where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + + tdLog.info("========== operator=22(OP_StateWindow) ==========") + sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 state_window(q_bigint);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 state_window(q_smallint);''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From b0b18b51dc5b61faaa5a523381f5b142f892406b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 27 Jul 2021 10:19:15 +0800 Subject: [PATCH 52/81] [TD-5543]: taosdump precision description and display. (#7030) Co-authored-by: Shuduo Sang --- src/kit/taosdump/taosdump.c | 60 +++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 50a58991b8..7b21023216 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -60,7 +60,7 @@ typedef struct { fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) #define errorPrint(fmt, ...) \ - do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0) + do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0) // -------------------------- SHOW DATABASE INTERFACE----------------------- @@ -234,9 +234,9 @@ static struct argp_option options[] = { {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, #if TSDB_SUPPORT_NANOSECOND == 1 - {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6}, + {"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6}, #else - {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms and us. Default is ms.", 6}, + {"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6}, #endif {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, @@ -547,8 +547,8 @@ static void parse_precision_first( free(tmp); exit(-1); } - strncpy(g_args.precision, tmp, - min(DB_PRECISION_LEN - 1, strlen(tmp))); + tstrncpy(g_args.precision, tmp, + min(DB_PRECISION_LEN, strlen(tmp) + 1)); free(tmp); } } @@ -599,6 +599,7 @@ static void parse_timestamp( return; } } else { + tstrncpy(arguments->precision, "n/a", strlen("n/a") + 1); tmpEpoch = atoll(tmp); } @@ -794,12 +795,14 @@ static int taosGetTableRecordInfo( while ((row = taos_fetch_row(result)) != NULL) { isSet = true; pTableRecordInfo->isMetric = false; - strncpy(pTableRecordInfo->tableRecord.name, + tstrncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); - strncpy(pTableRecordInfo->tableRecord.metric, + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1); + tstrncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], - fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1); break; } @@ -1080,8 +1083,8 @@ _dump_db_point: goto _exit_failure; } - strncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], + min(TSDB_DB_NAME_LEN, fields[TSDB_SHOW_DB_NAME_INDEX].bytes) + 1); if (g_args.with_property) { g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); @@ -1089,8 +1092,8 @@ _dump_db_point: g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - strncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + tstrncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], + min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes) + 1); //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); //g_dbInfos[count]->daysToKeep1; //g_dbInfos[count]->daysToKeep2; @@ -1103,8 +1106,8 @@ _dump_db_point: g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - strncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes) + 1); //g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } @@ -1255,17 +1258,19 @@ static int taosGetTableDes( tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); while ((row = taos_fetch_row(res)) != NULL) { - strncpy(tableDes->cols[count].field, + tstrncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - strncpy(tableDes->cols[count].type, + min(TSDB_COL_NAME_LEN + 1, + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1)); + tstrncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes)); + min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - strncpy(tableDes->cols[count].note, + tstrncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + min(COL_NOTE_LEN, + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1)); count++; } @@ -1700,8 +1705,9 @@ static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); - strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1); taosWrite(fd, &tableRecord, sizeof(STableRecord)); } @@ -1775,9 +1781,11 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) { while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1); tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], - min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes)); + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1); taosWrite(fd, &tableRecord, sizeof(STableRecord)); @@ -2165,7 +2173,7 @@ static int taosCheckParam(struct arguments *arguments) { if (g_args.arg_list_len == 0) { if ((!g_args.all_databases) && (!g_args.isDumpIn)) { - fprintf(stderr, "taosdump requires parameters\n"); + errorPrint("%s", "taosdump requires parameters for database and operation\n"); return -1; } } From f5758a29daf38ed65e912d65a509795e2d493319 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 27 Jul 2021 10:40:42 +0800 Subject: [PATCH 53/81] [TD-5504]fix tsdbRestoreLastColumns run time bug:memory heap overflow --- src/tsdb/src/tsdbMain.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index f995295745..40af6348b7 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -735,11 +735,12 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea continue; } // save not-null column + uint16_t bytes = IS_VAR_DATA_TYPE(pCol->type) ? varDataTLen(tdGetColDataOfRow(pDataCol, rowId)) : TYPE_BYTES[pCol->type]; SDataCol *pLastCol = &(pTable->lastCols[idx]); - pLastCol->pData = malloc(pCol->bytes); - pLastCol->bytes = pCol->bytes; + pLastCol->pData = malloc(bytes); + pLastCol->bytes = bytes; pLastCol->colId = pCol->colId; - memcpy(pLastCol->pData, value, pCol->bytes); + memcpy(pLastCol->pData, value, bytes); // save row ts(in column 0) pDataCol = pReadh->pDCols[0]->cols + 0; From dce3de680f6f90ae95f17de8916e9cd8620e59c5 Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 27 Jul 2021 10:45:53 +0800 Subject: [PATCH 54/81] [TD-5504]fix tsdbRestoreLastColumns run time bug:memory heap overflow --- src/tsdb/src/tsdbMain.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 40af6348b7..f3a7c4b7ee 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -722,7 +722,8 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea // OK,let's load row from backward to get not-null column for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) { SDataCol *pDataCol = pReadh->pDCols[0]->cols + i; - tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset); + const void* pColData = tdGetColDataOfRow(pDataCol, rowId); + tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset); //SDataCol *pDataCol = readh.pDCols[0]->cols + j; void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); if (isNull(value, pCol->type)) { @@ -735,7 +736,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea continue; } // save not-null column - uint16_t bytes = IS_VAR_DATA_TYPE(pCol->type) ? varDataTLen(tdGetColDataOfRow(pDataCol, rowId)) : TYPE_BYTES[pCol->type]; + uint16_t bytes = IS_VAR_DATA_TYPE(pCol->type) ? varDataTLen(pColData) : pCol->bytes; SDataCol *pLastCol = &(pTable->lastCols[idx]); pLastCol->pData = malloc(bytes); pLastCol->bytes = bytes; From 74fa1114f314738539819e519d5e56daab909bf4 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 27 Jul 2021 11:06:04 +0800 Subject: [PATCH 55/81] [TD-5369] change the nums of tables and insert rows! --- .../taosdemoTestNanoDatabase.json | 8 ++--- .../taosdemoTestNanoDatabaseNow.json | 2 +- .../taosdemoTestSupportNanoInsert.py | 36 +++++++++---------- .../taosdemoTestSupportNanoQuery.py | 22 ++++++------ .../taosdemoTestSupportNanoQuerycsv.json | 2 +- .../taosdemoTestSupportNanoSubscribe.json | 2 +- .../taosdemoTestSupportNanosubscribe.py | 6 ++-- 7 files changed, 38 insertions(+), 40 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json index 9010415fe6..246f1c35f2 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json @@ -35,13 +35,13 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb0_", "auto_create_table": "no", "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10000, + "insert_rows": 100, "childtable_offset":0, "multi_thread_write_one_tbl": "no", "insert_interval":0, @@ -61,13 +61,13 @@ { "name": "stb1", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb1_", "auto_create_table": "no", "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10000, + "insert_rows": 100, "childtable_offset":0, "multi_thread_write_one_tbl": "no", "insert_interval":0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json index d2542a0eba..f36b1f9b4c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json @@ -35,7 +35,7 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb0_", "auto_create_table": "no", "batch_create_tbl_num": 20, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 88a917da85..266a8fa712 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -56,46 +56,47 @@ class TDTestCase: os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) tdSql.execute("use nsdb") tdSql.query("show stables") - tdSql.checkData(0, 4, 1000) + tdSql.checkData(0, 4, 100) tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb0_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000000) + tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") tdSql.checkDataType(9, 1,"TIMESTAMP") tdSql.query("select last(ts) from stb0") - tdSql.getData(0, 0) + tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") # check stable stb1 which is insert with disord tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb1_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 10000000) + tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb1") tdSql.checkDataType(9, 1,"TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") - tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") - + tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") + # insert data from now time # check stable stb0 os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath) + tdSql.execute("use nsdb2") tdSql.query("show stables") - tdSql.checkData(0, 4, 1000) + tdSql.checkData(0, 4, 100) tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb0_0") tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 100000) + tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb0") tdSql.checkDataType(9,1,"TIMESTAMP") @@ -117,16 +118,11 @@ class TDTestCase: os.system("rm -rf ./insert_res.txt") os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") + # taosdemo test insert with command and parameter , detals show taosdemo --help - - os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) - - os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 600) - # check taosdemo -s sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;', diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py index 21a7037ce6..5a37cf9c7c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py @@ -55,15 +55,15 @@ class TDTestCase: # use where to filter - tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") - tdSql.checkData(0, 0, 3999000) - tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") - tdSql.checkData(0, 0, 1000000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ") + tdSql.checkData(0, 0, 4000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 5900) - tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") - tdSql.checkData(0, 0, 3999) - tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") - tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;") + tdSql.checkData(0, 0, 40) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 59) # select max min avg from special col @@ -87,8 +87,8 @@ class TDTestCase: print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) tdSql.query("select count(*) from stb0 group by tbname;") - tdSql.checkData(0, 0, 10000) - tdSql.checkData(100, 0, 10000) + tdSql.checkData(0, 0, 100) + tdSql.checkData(10, 0, 100) # query : query above sqls by taosdemo and continuously @@ -105,7 +105,7 @@ class TDTestCase: tdSql.checkDataType(3, 1, "TIMESTAMP") tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.query("select count(*) from stb0 where ts now -22d-1h-3s ;", + "sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;", "result": "./query_res1.txt" }, { diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json index 26d405b65b..1cc834164e 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json @@ -20,7 +20,7 @@ "result": "./subscribe_res0.txt" }, { - "sql": "select * from stb0 where ts > now -20d-1h-3s ;", + "sql": "select * from stb0 where ts < now -2d-1h-3s ;", "result": "./subscribe_res1.txt" }, { diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py index f6324577c1..6dcea6e7e0 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py @@ -79,7 +79,7 @@ class TDTestCase: # merge result files - sleep(20) + sleep(10) os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") @@ -90,7 +90,7 @@ class TDTestCase: self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) subTimes1 = self.subTimes("all_subscribe_res1.txt") - self.assertCheck("all_subscribe_res1.txt",subTimes1 ,0) + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,200) subTimes2 = self.subTimes("all_subscribe_res2.txt") self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) @@ -103,6 +103,7 @@ class TDTestCase: os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") subTimes0 = self.subTimes("all_subscribe_res0.txt") + print("pass") self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) @@ -112,6 +113,7 @@ class TDTestCase: sleep(3) os.system("rm -rf ./subscribe_res*") os.system("rm -rf ./all_subscribe*") + os.system("rm -rf ./*.py.sql") From 8aa0a37392c978a1f93448d839bc732e93c3d5c1 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 27 Jul 2021 11:40:47 +0800 Subject: [PATCH 56/81] [TD-4432]: add taodemo-testcase that using stmt interface --- .../taosdemoTestInsertWithJsonStmt.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py index cce6c83a07..0aade43183 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py @@ -244,17 +244,17 @@ class TDTestCase: tdSql.query("select count(*) from stb1") tdSql.checkData(0, 0, 10) - # # insert: sample json - # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) - # tdSql.execute("use dbtest123") - # tdSql.query("select c2 from stb0") - # tdSql.checkData(0, 0, 2147483647) - # tdSql.query("select * from stb1 where t1=-127") - # tdSql.checkRows(20) - # tdSql.query("select * from stb1 where t2=127") - # tdSql.checkRows(10) - # tdSql.query("select * from stb1 where t2=126") - # tdSql.checkRows(10) + # insert: sample json + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) + tdSql.execute("use dbtest123") + tdSql.query("select c2 from stb0") + tdSql.checkData(0, 0, 2147483647) + tdSql.query("select * from stb1 where t1=-127") + tdSql.checkRows(20) + tdSql.query("select * from stb1 where t2=127") + tdSql.checkRows(10) + tdSql.query("select * from stb1 where t2=126") + tdSql.checkRows(10) # insert: test interlace parament os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json -y " % binPath) From 606119e3a5a1c8e7d8548b2bc9abcdf66071e90f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 27 Jul 2021 13:05:48 +0800 Subject: [PATCH 57/81] add prompt msg if WAL size exceeds limit. (#7032) --- src/kit/taosdemo/taosdemo.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index cd46eb0bf8..c4dec6a231 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -648,7 +648,7 @@ static FILE * g_fpOfInsertResult = NULL; fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) #define errorPrint(fmt, ...) \ - do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0) + do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0) // for strncpy buffer overflow #define min(a, b) (((a) < (b)) ? (a) : (b)) @@ -5156,6 +5156,8 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) if (0 != taos_stmt_execute(pThreadInfo->stmt)) { errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n", __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); + + fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); exit(-1); } affectedRows = k; From f734285282444fda401d4f05431d23c1bd646f8a Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Tue, 27 Jul 2021 13:20:35 +0800 Subject: [PATCH 58/81] [TD-5506]: fix NullPointerException in JDBC-RESTful (#7021) * change jdbc version * [TD-5506]: throw SQLException in HttpClientPoolUtil --- cmake/install.inc | 2 +- src/connector/jdbc/CMakeLists.txt | 2 +- src/connector/jdbc/deploy-pom.xml | 2 +- src/connector/jdbc/pom.xml | 2 +- .../java/com/taosdata/jdbc/TSDBConstants.java | 4 +- .../java/com/taosdata/jdbc/TSDBError.java | 4 +- .../com/taosdata/jdbc/TSDBErrorNumbers.java | 13 +- .../taosdata/jdbc/rs/RestfulResultSet.java | 2 +- .../jdbc/utils/HttpClientPoolUtil.java | 202 ++++++++---------- 9 files changed, 106 insertions(+), 127 deletions(-) diff --git a/cmake/install.inc b/cmake/install.inc index 4494c19d49..e9ad240a79 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.33-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.34-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index b88869e954..e432dac1ce 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.33-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.34-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 86f498b24a..ef57198e78 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.33 + 2.0.34 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 0043536daf..3d5cf8efe3 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.33 + 2.0.34 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java index f3f04eff12..740e3c6c21 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java @@ -130,7 +130,7 @@ public abstract class TSDBConstants { case TSDBConstants.TSDB_DATA_TYPE_NCHAR: return Types.NCHAR; } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE); } public static String taosType2JdbcTypeName(int taosType) throws SQLException { @@ -160,7 +160,7 @@ public abstract class TSDBConstants { case TSDBConstants.TSDB_DATA_TYPE_NCHAR: return "NCHAR"; default: - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java index da89081428..d626698663 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -31,8 +31,8 @@ public class TSDBError { TSDBErrorMap.put(TSDBErrorNumbers.ERROR_URL_NOT_SET, "url is not set"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_SQL, "invalid sql"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range"); - TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE, "unknown taos type in tdengine"); - TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION, "unknown timestamp precision"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java index a796e6d86f..3c44d69be5 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java @@ -25,8 +25,10 @@ public class TSDBErrorNumbers { public static final int ERROR_URL_NOT_SET = 0x2312; // url is not set public static final int ERROR_INVALID_SQL = 0x2313; // invalid sql public static final int ERROR_NUMERIC_VALUE_OUT_OF_RANGE = 0x2314; // numeric value out of range - public static final int ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE = 0x2315; //unknown taos type in tdengine - public static final int ERROR_UNKNOWN_TIMESTAMP_PERCISION = 0x2316; // unknown timestamp precision + public static final int ERROR_UNKNOWN_TAOS_TYPE = 0x2315; //unknown taos type in tdengine + public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision + public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317; + public static final int ERROR_RESTFul_Client_IOException = 0x2318; public static final int ERROR_UNKNOWN = 0x2350; //unknown error @@ -62,8 +64,11 @@ public class TSDBErrorNumbers { errorNumbers.add(ERROR_URL_NOT_SET); errorNumbers.add(ERROR_INVALID_SQL); errorNumbers.add(ERROR_NUMERIC_VALUE_OUT_OF_RANGE); - errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); - errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PERCISION); + errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE); + errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION); + errorNumbers.add(ERROR_RESTFul_Client_IOException); + + errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception); errorNumbers.add(ERROR_SUBSCRIBE_FAILED); errorNumbers.add(ERROR_UNSUPPORTED_ENCODING); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index f0ea03638f..1ea39236b6 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -213,7 +213,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { long nanoAdjustment = Integer.parseInt(value.substring(20)); return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION); } } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java index e10bdb5aa9..de26ab7f1f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java @@ -1,15 +1,18 @@ package com.taosdata.jdbc.utils; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; import org.apache.http.HeaderElement; import org.apache.http.HeaderElementIterator; import org.apache.http.HttpEntity; +import org.apache.http.client.ClientProtocolException; +import org.apache.http.client.HttpRequestRetryHandler; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.*; import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.conn.ConnectionKeepAliveStrategy; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.DefaultHttpRequestRetryHandler; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.message.BasicHeaderElementIterator; @@ -17,35 +20,24 @@ import org.apache.http.protocol.HTTP; import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; +import javax.net.ssl.SSLException; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; - +import java.sql.SQLException; public class HttpClientPoolUtil { private static final String DEFAULT_CONTENT_TYPE = "application/json"; + private static final int DEFAULT_MAX_TOTAL = 200; + private static final int DEFAULT_MAX_PER_ROUTE = 20; private static final int DEFAULT_TIME_OUT = 15000; - private static final int DEFAULT_MAX_PER_ROUTE = 32; - private static final int DEFAULT_MAX_TOTAL = 1000; private static final int DEFAULT_HTTP_KEEP_TIME = 15000; - - private static CloseableHttpClient httpClient; - - private static synchronized void initPools() { - if (httpClient == null) { - PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); - connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); - connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); - httpClient = HttpClients.custom() - .setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY) - .setConnectionManager(connectionManager) - .setRetryHandler(new DefaultHttpRequestRetryHandler(3, true)) - .build(); - } - } + private static final int DEFAULT_MAX_RETRY_COUNT = 5; private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> { HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); - int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000; while (it.hasNext()) { HeaderElement headerElement = it.nextElement(); String param = headerElement.getName(); @@ -53,34 +45,73 @@ public class HttpClientPoolUtil { if (value != null && param.equalsIgnoreCase("timeout")) { try { return Long.parseLong(value) * 1000; - } catch (Exception e) { - new Exception("format KeepAlive timeout exception, exception:" + e.toString()).printStackTrace(); + } catch (NumberFormatException ignore) { } } } - return keepTime; + return DEFAULT_HTTP_KEEP_TIME * 1000; }; - /** - * 执行http post请求 - * 默认采用Content-Type:application/json,Accept:application/json - * - * @param uri 请求地址 - * @param data 请求数据 - * @return responseBody - */ - public static String execute(String uri, String data, String token) { - long startTime = System.currentTimeMillis(); + private static final HttpRequestRetryHandler retryHandler = (exception, executionCount, httpContext) -> { + if (executionCount >= DEFAULT_MAX_RETRY_COUNT) + // do not retry if over max retry count + return false; + if (exception instanceof InterruptedIOException) + // timeout + return false; + if (exception instanceof UnknownHostException) + // unknown host + return false; + if (exception instanceof SSLException) + // SSL handshake exception + return false; + return true; + }; + + private static CloseableHttpClient httpClient; + + static { + PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); + connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); + connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); + httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).setRetryHandler(retryHandler).build(); + } + + /*** execute GET request ***/ + public static String execute(String uri) throws SQLException { HttpEntity httpEntity = null; - HttpEntityEnclosingRequestBase method = null; String responseBody = ""; try { - if (httpClient == null) { - initPools(); + HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME); + HttpContext context = HttpClientContext.create(); + CloseableHttpResponse httpResponse = httpClient.execute(method, context); + httpEntity = httpResponse.getEntity(); + if (httpEntity != null) { + responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); } - method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); - method.setHeader("Content-Type", "text/plain"); - method.setHeader("Connection", "keep-alive"); + } catch (ClientProtocolException e) { + e.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); + } catch (IOException exception) { + exception.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); + } finally { + if (httpEntity != null) { + EntityUtils.consumeQuietly(httpEntity); + } + } + return responseBody; + } + + + /*** execute POST request ***/ + public static String execute(String uri, String data, String token) throws SQLException { + HttpEntity httpEntity = null; + String responseBody = ""; + try { + HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME); + method.setHeader(HTTP.CONTENT_TYPE, "text/plain"); + method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE); method.setHeader("Authorization", "Taosd " + token); method.setEntity(new StringEntity(data, StandardCharsets.UTF_8)); @@ -88,46 +119,31 @@ public class HttpClientPoolUtil { CloseableHttpResponse httpResponse = httpClient.execute(method, context); httpEntity = httpResponse.getEntity(); if (httpEntity != null) { - responseBody = EntityUtils.toString(httpEntity, "UTF-8"); + responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); } - } catch (Exception e) { - if (method != null) { - method.abort(); - } - new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace(); + } catch (ClientProtocolException e) { + e.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); + } catch (IOException exception) { + exception.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); } finally { if (httpEntity != null) { - try { - EntityUtils.consumeQuietly(httpEntity); - } catch (Exception e) { - new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace(); - } + EntityUtils.consumeQuietly(httpEntity); } } return responseBody; } - /** - * * 创建请求 - * - * @param uri 请求url - * @param methodName 请求的方法类型 - * @param contentType contentType类型 - * @param timeout 超时时间 - * @return HttpRequestBase 返回类型 - * @author lisc - */ - private static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) { - if (httpClient == null) { - initPools(); - } + /*** create http request ***/ + private static HttpRequestBase getRequest(String uri, String methodName) { HttpRequestBase method; - if (timeout <= 0) { - timeout = DEFAULT_TIME_OUT; - } - RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000) - .setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000) - .setExpectContinueEnabled(false).build(); + RequestConfig requestConfig = RequestConfig.custom() + .setSocketTimeout(DEFAULT_TIME_OUT * 1000) + .setConnectTimeout(DEFAULT_TIME_OUT * 1000) + .setConnectionRequestTimeout(DEFAULT_TIME_OUT * 1000) + .setExpectContinueEnabled(false) + .build(); if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) { method = new HttpPut(uri); } else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) { @@ -137,52 +153,10 @@ public class HttpClientPoolUtil { } else { method = new HttpPost(uri); } - - if (contentType == null || contentType.isEmpty() || contentType.replaceAll("\\s", "").isEmpty()) { - contentType = DEFAULT_CONTENT_TYPE; - } - method.addHeader("Content-Type", contentType); - method.addHeader("Accept", contentType); + method.addHeader(HTTP.CONTENT_TYPE, DEFAULT_CONTENT_TYPE); + method.addHeader("Accept", DEFAULT_CONTENT_TYPE); method.setConfig(requestConfig); return method; } - /** - * 执行GET 请求 - * - * @param uri 网址 - * @return responseBody - */ - public static String execute(String uri) { - long startTime = System.currentTimeMillis(); - HttpEntity httpEntity = null; - HttpRequestBase method = null; - String responseBody = ""; - try { - if (httpClient == null) { - initPools(); - } - method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); - HttpContext context = HttpClientContext.create(); - CloseableHttpResponse httpResponse = httpClient.execute(method, context); - httpEntity = httpResponse.getEntity(); - if (httpEntity != null) { - responseBody = EntityUtils.toString(httpEntity, "UTF-8"); - } - } catch (Exception e) { - if (method != null) { - method.abort(); - } - e.printStackTrace(); - } finally { - if (httpEntity != null) { - try { - EntityUtils.consumeQuietly(httpEntity); - } catch (Exception e) { - new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace(); - } - } - } - return responseBody; - } } \ No newline at end of file From 09e52f9d95322ad4cc407e31500f1142b18bda05 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 27 Jul 2021 14:27:41 +0800 Subject: [PATCH 59/81] [TD-5497]: add test case --- tests/pytest/query/nestedQuery/queryWithOrderLimit.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py index 26eda1120b..692b5b7d36 100644 --- a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py +++ b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py @@ -70,6 +70,14 @@ class TDTestCase: tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5 soffset 7)") tdSql.checkRows(3) + # https://jira.taosdata.com:18080/browse/TD-5497 + tdSql.execute("create table tt(ts timestamp ,i int)") + tdSql.execute("insert into tt values(now, 11)(now + 1s, -12)") + tdSql.query("select * from (select max(i),0-min(i) from tt)") + tdSql.checkRows(1); + tdSql.checkData(0, 0, 11); + tdSql.checkData(0, 1, 12.0); + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From f89a8ea6d63bf7fb0b5f96f5927352e69bae308f Mon Sep 17 00:00:00 2001 From: lichuang Date: Tue, 27 Jul 2021 14:33:30 +0800 Subject: [PATCH 60/81] [TD-5554]fix memory heap runtime overflow --- src/tsdb/src/tsdbMemTable.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 9ac5503e5f..ee9dadd9b7 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -1019,7 +1019,7 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro if (isDataRow) { value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type, - TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); + TD_DATA_ROW_HEAD_SIZE + pTCol->offset); } else { // SKVRow SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId); @@ -1034,14 +1034,17 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro SDataCol *pDataCol = &(pLatestCols[idx]); if (pDataCol->pData == NULL) { - pDataCol->pData = malloc(pSchema->columns[j].bytes); - pDataCol->bytes = pSchema->columns[j].bytes; - } else if (pDataCol->bytes < pSchema->columns[j].bytes) { - pDataCol->pData = realloc(pDataCol->pData, pSchema->columns[j].bytes); - pDataCol->bytes = pSchema->columns[j].bytes; + pDataCol->pData = malloc(pTCol->bytes); + pDataCol->bytes = pTCol->bytes; + } else if (pDataCol->bytes < pTCol->bytes) { + pDataCol->pData = realloc(pDataCol->pData, pTCol->bytes); + pDataCol->bytes = pTCol->bytes; } - - memcpy(pDataCol->pData, value, pDataCol->bytes); + // the actual value size + uint16_t bytes = IS_VAR_DATA_TYPE(pTCol->type) ? varDataTLen(value) : pTCol->bytes; + // the actual data size CANNOT larger than column size + assert(pTCol->bytes >= bytes); + memcpy(pDataCol->pData, value, bytes); //tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData); pDataCol->ts = memRowKey(row); } From 7557df65eca4b4cdf89a60764d3294fd40c40eaa Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 27 Jul 2021 14:49:46 +0800 Subject: [PATCH 61/81] [TD-5555] : remove arm32 official support for server side. --- documentation20/cn/02.getting-started/docs.md | 14 ++++++-------- documentation20/en/02.getting-started/docs.md | 8 +++----- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index c67b9e61b9..ab10b28fd3 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -179,17 +179,15 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); | | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | | -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | | X64 | ● | ● | | ○ | ● | ● | ● | -| 树莓派 ARM32 | | ● | ● | | | | | | 龙芯 MIPS64 | | | ● | | | | | -| 鲲鹏 ARM64 | | ○ | ○ | | ● | | | -| 申威 Alpha64 | | | ○ | ● | | | | +| 鲲鹏 ARM64 | | ○ | ○ | | ● | | | +| 申威 Alpha64 | | | ○ | ● | | | | | 飞腾 ARM64 | | ○ 优麒麟 | | | | | | | 海光 X64 | ● | ● | ● | ○ | ● | ● | | -| 瑞芯微 ARM64/32 | | | ○ | | | | | -| 全志 ARM64/32 | | | ○ | | | | | -| 炬力 ARM64/32 | | | ○ | | | | | -| TI ARM32 | | | ○ | | | | | -| 华为云 ARM64 | | | | | | | ● | +| 瑞芯微 ARM64 | | | ○ | | | | | +| 全志 ARM64 | | | ○ | | | | | +| 炬力 ARM64 | | | ○ | | | | | +| 华为云 ARM64 | | | | | | | ● | 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 19af3b5f31..3c9d9ac6af 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -188,16 +188,14 @@ List of platforms supported by TDengine server | | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 | | ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ | | X64 | ● | ● | | ○ | ● | ● | -| Raspberry ARM32 | | ● | ● | | | | | Loongson MIPS64 | | | ● | | | | | Kunpeng ARM64 | | ○ | ○ | | ● | | | SWCPU Alpha64 | | | ○ | ● | | | | FT ARM64 | | ○Ubuntu Kylin | | | | | | Hygon X64 | ● | ● | ● | ○ | ● | ● | -| Rockchip ARM64/32 | | | ○ | | | | -| Allwinner ARM64/32 | | | ○ | | | | -| Actions ARM64/32 | | | ○ | | | | -| TI ARM32 | | | ○ | | | | +| Rockchip ARM64 | | | ○ | | | | +| Allwinner ARM64 | | | ○ | | | | +| Actions ARM64 | | | ○ | | | | Note: ● has been verified by official tests; ○ has been verified by unofficial tests. From 84cbdeb8a7d75c787149ca10a915223347d99a78 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 27 Jul 2021 15:40:24 +0800 Subject: [PATCH 62/81] [TD-5370]:new branch for test case about taosdump nano support --- tests/pytest/fulltest.sh | 2 +- tests/pytest/tools/taosdumpTestNanoSupport.py | 362 ++++++++++++++++++ tests/pytest/util/sql.py | 15 +- 3 files changed, 377 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/tools/taosdumpTestNanoSupport.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index cd93025666..a805323d78 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -163,7 +163,7 @@ python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py - +python3 test.py -f tools/taosdumpTestNanoSupport.py # update python3 ./test.py -f update/allow_update.py diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py new file mode 100644 index 0000000000..ca8832170b --- /dev/null +++ b/tests/pytest/tools/taosdumpTestNanoSupport.py @@ -0,0 +1,362 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" + self.numberOfTables = 10 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + + def createdb(self, precision="ns"): + tb_nums = self.numberOfTables + per_tb_rows = self.numberOfRecords + + def build_db(precision, start_time): + tdSql.execute("drop database if exists timedb1") + tdSql.execute( + "create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"") + + tdSql.execute("use timedb1") + tdSql.execute( + "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))") + for tb in range(tb_nums): + tbname = "t"+str(tb) + tdSql.execute("create table " + tbname + + " using st tags(1, 'beijing')") + sql = "insert into " + tbname + " values" + currts = start_time + if precision == "ns": + ts_seed = 1000000000 + elif precision == "us": + ts_seed = 1000000 + else: + ts_seed = 1000 + + for i in range(per_tb_rows): + sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i % + 100, i % 100, currts + i*100) # currts +1000ms (1000000000ns) + tdSql.execute(sql) + + if precision == "ns": + start_time = 1625068800000000000 + build_db(precision, start_time) + + elif precision == "us": + start_time = 1625068800000000 + build_db(precision, start_time) + + elif precision == "ms": + start_time = 1625068800000 + build_db(precision, start_time) + + else: + print("other time precision not valid , please check! ") + + + def run(self): + + # clear envs + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exist!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # create nano second database + + self.createdb(precision="ns") + + # dump all data + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + # dump part data with -S -E + os.system( + '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + # replace strings to dump in databases + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + # dump data and check for taosdump + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + # check data + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test nano second : dump check data pass for all data!" ) + else: + tdLog.info("test nano second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test nano second : dump check data pass for data! " ) + else: + tdLog.info(" test nano second : dump check data failed for data !" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test nano second : dump check data pass for data! " ) + else: + tdLog.info(" test nano second : dump check data failed for data !" ) + + + # us second support test case + + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exits!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + self.createdb(precision="us") + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + os.system( + '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test us second : dump check data pass for all data!" ) + else: + tdLog.info("test us second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test us second : dump check data pass for data! " ) + else: + tdLog.info(" test us second : dump check data failed for data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test us second : dump check data pass for data! " ) + else: + tdLog.info(" test us second : dump check data failed for data! " ) + + + # ms second support test case + + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exits!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + self.createdb(precision="ms") + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + os.system( + '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test ms second : dump check data pass for all data!" ) + else: + tdLog.info("test ms second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test ms second : dump check data pass for data! " ) + else: + tdLog.info(" test ms second : dump check data failed for data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test ms second : dump check data pass for data! " ) + else: + tdLog.info(" test ms second : dump check data failed for data! " ) + + + os.system("rm -rf ./taosdumptest/") + os.system("rm -rf ./dump_result.txt") + os.system("rm -rf *.py.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 4eb0c8f857..4a04552206 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -197,6 +197,19 @@ class TDSql: self.checkRowCol(row, col) return self.queryResult[row][col] + def getResult(self, sql): + self.sql = sql + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return self.queryResult + + def executeTimes(self, sql, times): for i in range(times): try: @@ -282,4 +295,4 @@ class TDSql: tdLog.info("dir: %s is created" %dir) pass -tdSql = TDSql() +tdSql = TDSql() \ No newline at end of file From 0cdb7c9e71ce020f425dad5c3a738f45c506eb0a Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 27 Jul 2021 18:08:50 +0800 Subject: [PATCH 63/81] [TD-5072] : fix description about underscore wildcard. --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 55db543cc3..72f4876dcf 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -208,7 +208,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。 - 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。(如果希望匹配表名中带有的下划线,那么这里可以用反斜线进行转义,也就是说 '\\\_' 会被用于匹配表名中原始带有的下划线符号) + 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。 - **显示一个数据表的创建语句** From 6faf1d5bb3e98af1b587299b9a14bd95ec30ebca Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 27 Jul 2021 18:26:10 +0800 Subject: [PATCH 64/81] Update sdbCompClusterReplica2.py --- tests/pytest/wal/sdbCompClusterReplica2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/wal/sdbCompClusterReplica2.py b/tests/pytest/wal/sdbCompClusterReplica2.py index dd5a375151..ba80e3864a 100644 --- a/tests/pytest/wal/sdbCompClusterReplica2.py +++ b/tests/pytest/wal/sdbCompClusterReplica2.py @@ -86,7 +86,7 @@ class TwoClients: tdSql.execute("alter table stb2_0 add column col2 binary(4)") tdSql.execute("alter table stb2_0 drop column col1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") - tdSql.execute("drop dnode 2") + tdSql.execute("drop dnode chenhaoran02") sleep(10) os.system("rm -rf /var/lib/taos/*") print("clear dnode chenhaoran02'data files") @@ -142,4 +142,4 @@ class TwoClients: clients = TwoClients() clients.initConnection() # clients.getBuildPath() -clients.run() \ No newline at end of file +clients.run() From e0046dbb25f032b71a1745254309480ca07a50da Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 27 Jul 2021 22:46:03 +0800 Subject: [PATCH 65/81] [td-5175]: refactor the retry procedure while the error caused by tablemeta cache occurs. --- src/client/inc/tscUtil.h | 2 + src/client/src/tscAsync.c | 102 +++++------------------------------ src/client/src/tscServer.c | 74 +++++++++++-------------- src/client/src/tscSubquery.c | 39 +++++++++++--- src/client/src/tscUtil.c | 16 ++++++ 5 files changed, 94 insertions(+), 139 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index a4df124fa2..d0aecfe1a2 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -354,6 +354,8 @@ char* strdup_throw(const char* str); bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src); SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg); +void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id); + #ifdef __cplusplus } #endif diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index d857d00e15..910a80d6af 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -325,61 +325,6 @@ void tscAsyncResultOnError(SSqlObj* pSql) { int tscSendMsgToServer(SSqlObj *pSql); -static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SQueryInfo* pQueryInfo) { - // handle the invalid table error code for super table. - // update the pExpr info, colList info, number of table columns - // TODO Re-parse this sql and issue the corresponding subquery as an alternative for this case. - if (pSql->retryReason == TSDB_CODE_TDB_INVALID_TABLE_ID) { - int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo); - int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta); - - SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); - SSchema *pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - - for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr *pExpr = &(tscExprGet(pQueryInfo, i)->base); - - // update the table uid - pExpr->uid = pTableMetaInfo->pTableMeta->id.uid; - - if (pExpr->colInfo.colIndex >= 0) { - int32_t index = pExpr->colInfo.colIndex; - - if ((TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && index >= numOfCols) || - (TSDB_COL_IS_TAG(pExpr->colInfo.flag) && (index < 0 || index >= numOfTags))) { - return pSql->retryReason; - } - - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { - if ((pTagSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) && - strcasecmp(pExpr->colInfo.name, pTagSchema[pExpr->colInfo.colIndex].name) != 0) { - return pSql->retryReason; - } - } else if (TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag)) { - if ((pSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) && - strcasecmp(pExpr->colInfo.name, pSchema[pExpr->colInfo.colIndex].name) != 0) { - return pSql->retryReason; - } - } else { // do nothing for udc - } - } - } - - // validate the table columns information - for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) { - SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i); - if (pCol->columnIndex >= numOfCols) { - return pSql->retryReason; - } - } - } else { - // do nothing - } - - return TSDB_CODE_SUCCESS; -} - void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param); if (pSql == NULL) return; @@ -391,7 +336,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { pRes->code = code; SSqlObj *sub = (SSqlObj*) res; - const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; + const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"[multi-]tableMeta"; if (code != TSDB_CODE_SUCCESS) { tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code)); goto _error; @@ -401,31 +346,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (pSql->pStream == NULL) { SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); - // check if it is a sub-query of super table query first, if true, enter another routine - if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | - TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { - tscDebug("0x%" PRIx64 " update cached table-meta, continue to process sql and send the corresponding query", pSql->self); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); +// assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) == 0); - code = tscGetTableMeta(pSql, pTableMetaInfo); - assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); - - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, pSql->self); - return; - } - - assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); - code = updateMetaBeforeRetryQuery(pSql, pTableMetaInfo, pQueryInfo); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - // tscBuildAndSendRequest can add error into async res - tscBuildAndSendRequest(pSql, NULL); - taosReleaseRef(tscObjRef, pSql->self); - return; - } else { // continue to process normal async query + // super table subquery failure will be ignored +// if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | +// TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self); @@ -437,7 +362,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { goto _error; } - if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { + if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { @@ -448,17 +373,14 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { } (*pSql->fp)(pSql->param, pSql, code); - } else { - if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { - tscImportDataFromFile(pSql); - } else { - tscHandleMultivnodeInsert(pSql); - } + } else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert + tscImportDataFromFile(pSql); + } else { // sql string insert + tscHandleMultivnodeInsert(pSql); } } else { if (pSql->retryReason != TSDB_CODE_SUCCESS) { - tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", - pSql->self); + tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self); tscResetSqlCmd(pCmd, false); pSql->retryReason = TSDB_CODE_SUCCESS; } else { @@ -479,7 +401,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { taosReleaseRef(tscObjRef, pSql->self); return; - } +// } } else { // stream computing tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pCmd->command); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b60a28b9fd..dd17e25f58 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -390,33 +390,40 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { pSql->cmd.insertParam.schemaAttached = 1; } + // single table query error need to be handled here. if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && - (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || // change the retry procedure - /*(*/rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || + (((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || // change the retry procedure + rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) || rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || // change the retry procedure rpcMsg->code == TSDB_CODE_APP_NOT_READY)) { - - pSql->retry++; - tscWarn("0x%"PRIx64" it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry); - pSql->res.code = rpcMsg->code; // keep the previous error code - if (pSql->retry > pSql->maxRetry) { - tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry); - } else { - // wait for a little bit moment and then retry - // todo do not sleep in rpc callback thread, add this process into queue to process - if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { - int32_t duration = getWaitingTimeInterval(pSql->retry); - taosMsleep(duration); - } + if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | + TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) && + !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) { + // do nothing in case of super table subquery + } else { + pSql->retry += 1; + tscWarn("0x%" PRIx64 " it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry); - pSql->retryReason = rpcMsg->code; - rpcMsg->code = tscRenewTableMeta(pSql, 0); - // if there is an error occurring, proceed to the following error handling procedure. - if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, handle); - rpcFreeCont(rpcMsg->pCont); - return; + pSql->res.code = rpcMsg->code; // keep the previous error code + if (pSql->retry > pSql->maxRetry) { + tscError("0x%" PRIx64 " max retry %d reached, give up", pSql->self, pSql->maxRetry); + } else { + // wait for a little bit moment and then retry + // todo do not sleep in rpc callback thread, add this process into queue to process + if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { + int32_t duration = getWaitingTimeInterval(pSql->retry); + taosMsleep(duration); + } + + pSql->retryReason = rpcMsg->code; + rpcMsg->code = tscRenewTableMeta(pSql, 0); + // if there is an error occurring, proceed to the following error handling procedure. + if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, handle); + rpcFreeCont(rpcMsg->pCont); + return; + } } } } @@ -2521,14 +2528,7 @@ int tscProcessDropDbRsp(SSqlObj *pSql) { int tscProcessDropTableRsp(SSqlObj *pSql) { STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - - //The cached tableMeta is expired in this case, so clean it in hash table - char name[TSDB_TABLE_FNAME_LEN] = {0}; - tNameExtractFullName(&pTableMetaInfo->name, name); - - taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); - tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaMap)); - + tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self); tfree(pTableMetaInfo->pTableMeta); return 0; } @@ -2915,9 +2915,7 @@ static void freeElem(void* p) { * @return status code */ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { - SSqlCmd *pCmd = &pSql->cmd; - - SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); char name[TSDB_TABLE_FNAME_LEN] = {0}; @@ -2934,15 +2932,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { } // remove stored tableMeta info in hash table - size_t len = strlen(name); - taosHashRemove(tscTableMetaMap, name, len); - - if (pTableMeta->tableType == TSDB_SUPER_TABLE) { - void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len); - if (pv != NULL) { - taosCacheRelease(tscVgroupListBuf, &pv, true); - } - } + tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self); SArray* pNameList = taosArrayInit(1, POINTER_BYTES); SArray* vgroupList = taosArrayInit(1, POINTER_BYTES); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b98ffd7638..7c10114cfd 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2704,8 +2704,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tstrerror(pParentSql->res.code)); // release allocated resource - tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, - pState->numOfSub); + tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, pState->numOfSub); tscFreeRetrieveSup(pSql); @@ -2713,7 +2712,35 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd); if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { - (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); + + int32_t code = pParentSql->res.code; + if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) { + // remove the cached tableMeta and vgroup id list, and then parse the sql again + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentSql->cmd, 0); + tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); + + tscResetSqlCmd(&pParentSql->cmd, true); + pParentSql->res.code = TSDB_CODE_SUCCESS; + pParentSql->retry++; + + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, + tstrerror(code), pParentSql->retry); + + code = tsParseSql(pParentSql, true); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + return; + } + + if (code != TSDB_CODE_SUCCESS) { + pParentSql->res.code = code; + tscAsyncResultOnError(pParentSql); + return; + } + + executeQuery(pParentSql, pQueryInfo); + } else { + (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); + } } else { // regular super table query if (pParentSql->res.code != TSDB_CODE_SUCCESS) { tscAsyncResultOnError(pParentSql); @@ -2996,7 +3023,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { assert(code == taos_errno(pSql)); - if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { + if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID)) { tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry); int32_t sent = 0; @@ -3005,7 +3032,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { return; } } else { - tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times, set global code:%s", pParentSql->self, pSql->self, tstrerror(code)); + tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times or no need to retry, set global code:%s", pParentSql->self, pSql->self, tstrerror(code)); atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort } @@ -3129,8 +3156,6 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) } pParentObj->res.code = TSDB_CODE_SUCCESS; -// pParentObj->cmd.parseFinished = false; - tscResetSqlCmd(&pParentObj->cmd, false); // in case of insert, redo parsing the sql string and build new submit data block for two reasons: diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 4454844ea0..ae5a7a69a8 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4857,3 +4857,19 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) { return info; } + +void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { + char fname[TSDB_TABLE_FNAME_LEN] = {0}; + tNameExtractFullName(&pTableMetaInfo->name, fname); + + int32_t len = (int32_t) strnlen(fname, TSDB_TABLE_FNAME_LEN); + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + void* pv = taosCacheAcquireByKey(tscVgroupListBuf, fname, len); + if (pv != NULL) { + taosCacheRelease(tscVgroupListBuf, &pv, true); + } + } + + taosHashRemove(tscTableMetaMap, fname, len); + tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); +} \ No newline at end of file From 7aaa16c88d18cc965ab79af1b0e1b014699de5cd Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 28 Jul 2021 11:51:33 +0800 Subject: [PATCH 66/81] [TD-5589] : fix picture url about replica restore. --- documentation20/cn/03.architecture/02.replica/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/03.architecture/02.replica/docs.md b/documentation20/cn/03.architecture/02.replica/docs.md index 59192ee0cc..183306eed8 100644 --- a/documentation20/cn/03.architecture/02.replica/docs.md +++ b/documentation20/cn/03.architecture/02.replica/docs.md @@ -140,7 +140,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下: -![replica-forward.png](page://images/architecture/replica-forward.png) +![replica-restore.png](page://images/architecture/replica-restore.png) 1. 通过已经建立的TCP连接,发送sync req给master节点 2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd) From 7845fc96821ec8f29fd5ca813e67cbaa38e17f9c Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:04:59 +0800 Subject: [PATCH 67/81] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 539 ++++++++++++++++++++++++++++ 1 file changed, 539 insertions(+) create mode 100644 tests/pytest/query/operator_cost.py diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py new file mode 100644 index 0000000000..b94d5fa3b3 --- /dev/null +++ b/tests/pytest/query/operator_cost.py @@ -0,0 +1,539 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5074 + + startTime = time.time() + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + tdSql.execute('''create table table_21 using stable_2 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdLog.info("========== operator=1(OP_TableScan) ==========") + tdLog.info("========== operator=7(OP_Project) ==========") + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=6(OP_Aggregate) ==========") + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=9(OP_Limit) ==========") + sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' + tdSql.query(sql) + tdSql.checkRows(5) + sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=16(OP_DummyInput) ==========") + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + + tdLog.info("========== operator=10(OP_SLimit) ==========") + sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=20(OP_Distinct) ==========") + tdLog.info("========== operator=4(OP_TagScan) ==========") + sql = '''select distinct(t_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + sql = '''select distinct(loc) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + sql = '''select distinct(tbname) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + + tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") + sql = '''select last(q_int),first(q_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bool),first(q_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_binary),first(q_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_float),first(q_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_double),first(q_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_ts),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), + first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), + first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=8(OP_Groupby) ==========") + sql = '''select stddev(q_int) from table_0 group by q_int;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' + tdSql.query(sql) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' + tdSql.query(sql) + + tdLog.info("========== operator=11(OP_TimeWindow) ==========") + sql = '''select last(q_int) from table_0 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=12(OP_SessionWindow) ==========") + sql = '''select count(*) from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*) from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=13(OP_Fill) ==========") + sql = '''select sum(q_int) from table_0 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + #TD-5190 + sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + + tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") + sql = '''select avg(q_int) from stable_1 where ts=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having sum(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having avg(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having min(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having max(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having first(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having last(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + + tdLog.info("========== operator=21(OP_Join) ==========") + sql = '''select t1.q_int,t2.q_int from + (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from table_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from table_0) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from stable_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.*,t3.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 + where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + + tdLog.info("========== operator=22(OP_StateWindow) ==========") + sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 state_window(q_bigint);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 state_window(q_smallint);''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 8b6d976e86db51c9106c054747bc48aacc243b05 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:05:16 +0800 Subject: [PATCH 68/81] [TD-5074]:test operator cost --- tests/pytest/query/operator.py | 539 --------------------------------- 1 file changed, 539 deletions(-) delete mode 100644 tests/pytest/query/operator.py diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py deleted file mode 100644 index b94d5fa3b3..0000000000 --- a/tests/pytest/query/operator.py +++ /dev/null @@ -1,539 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql -import random -import time - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - self.ts = 1600000000000 - self.num = 10 - - def run(self): - tdSql.prepare() - # test case for https://jira.taosdata.com:18080/browse/TD-5074 - - startTime = time.time() - - tdSql.execute('''create stable stable_1 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) - tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, - t_bool bool , t_binary binary(20) , t_nchar nchar(20) , - t_float float , t_double double , t_ts timestamp);''') - tdSql.execute('''create stable stable_2 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) - tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, - t_bool bool , t_binary binary(20) , t_nchar nchar(20) , - t_float float , t_double double , t_ts timestamp);''') - tdSql.execute('''create table table_0 using stable_1 - tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') - tdSql.execute('''create table table_1 using stable_1 - tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , - 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') - tdSql.execute('''create table table_2 using stable_1 - tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , - 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') - tdSql.execute('''create table table_3 using stable_1 - tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') - tdSql.execute('''create table table_4 using stable_1 - tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') - tdSql.execute('''create table table_5 using stable_1 - tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') - tdSql.execute('''create table table_21 using stable_2 - tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') - #regular table - tdSql.execute('''create table regular_table_1 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) ;''') - - for i in range(self.num): - tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' - % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, - i, i, random.random(), random.random(), 1262304000001 + i)) - tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' - % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, - i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) - tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' - % (self.ts + i, random.randint(-2147483647, 2147483647), - random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), - random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), - random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) - tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' - % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, - i, i, random.random(), random.random(), 1262304000001 + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' - % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, - i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' - % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), - random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), - random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), - random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' - % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) - - tdLog.info("========== operator=1(OP_TableScan) ==========") - tdLog.info("========== operator=7(OP_Project) ==========") - sql = '''select * from stable_1''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - sql = '''select * from regular_table_1''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - - tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") - sql = '''select last_row(*) from stable_1;''' - tdSql.query(sql) - tdSql.checkData(0,1,self.num-1) - - tdLog.info("========== operator=6(OP_Aggregate) ==========") - sql = '''select last_row(*) from regular_table_1;''' - tdSql.query(sql) - tdSql.checkData(0,1,self.num-1) - - tdLog.info("========== operator=9(OP_Limit) ==========") - sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' - tdSql.query(sql) - tdSql.checkRows(5) - sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' - tdSql.query(sql) - tdSql.checkRows(1) - - sql = '''select * from regular_table_1 ;''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - sql = '''select last_row(*) from (select * from regular_table_1);''' - tdSql.query(sql) - tdSql.checkRows(1) - tdSql.checkData(0,1,self.num-1) - - - sql = '''select last_row(*) from - ((select * from table_0) union all - (select * from table_1) union all - (select * from table_2));''' - tdSql.error(sql) - - tdLog.info("========== operator=16(OP_DummyInput) ==========") - sql = '''select last_row(*) from - ((select last_row(*) from table_0) union all - (select last_row(*) from table_1) union all - (select last_row(*) from table_2));''' - tdSql.error(sql) - - sql = '''select last_row(*) from - ((select * from table_0 limit 5 offset 5) union all - (select * from table_1 limit 5 offset 5) union all - (select * from regular_table_1 limit 5 offset 5));''' - tdSql.error(sql) - - tdLog.info("========== operator=10(OP_SLimit) ==========") - sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' - tdSql.query(sql) - tdSql.checkRows(3) - - sql = '''select last_row(*) from - ((select * from table_0) union all - (select * from table_1) union all - (select * from table_2));''' - tdSql.error(sql) - - tdLog.info("========== operator=20(OP_Distinct) ==========") - tdLog.info("========== operator=4(OP_TagScan) ==========") - sql = '''select distinct(t_bool) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(2) - sql = '''select distinct(loc) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_int) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_bigint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_smallint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_tinyint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_nchar) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_float) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_double) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(3) - sql = '''select distinct(tbname) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - - tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") - sql = '''select last(q_int),first(q_int) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_bool),first(q_bool) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_binary),first(q_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_float),first(q_float) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_double),first(q_double) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_ts),first(q_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), - last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), - first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), - last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), - first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=8(OP_Groupby) ==========") - sql = '''select stddev(q_int) from table_0 group by q_int;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' - tdSql.query(sql) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' - tdSql.query(sql) - - tdLog.info("========== operator=11(OP_TimeWindow) ==========") - sql = '''select last(q_int) from table_0 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=12(OP_SessionWindow) ==========") - sql = '''select count(*) from table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*) from regular_table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=13(OP_Fill) ==========") - sql = '''select sum(q_int) from table_0 - where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - #TD-5190 - sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 - where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - - tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") - sql = '''select avg(q_int) from stable_1 where ts=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having sum(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having avg(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having min(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having max(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having first(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having last(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - - tdLog.info("========== operator=21(OP_Join) ==========") - sql = '''select t1.q_int,t2.q_int from - (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from table_1) t1 , (select * from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from regular_table_1) t1 , (select * from table_0) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from stable_1) t1 , (select * from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from regular_table_1) t1 , (select * from stable_1) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.*,t3.* from - (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 - where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - - tdLog.info("========== operator=22(OP_StateWindow) ==========") - sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_1 state_window(q_bigint);''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 state_window(q_smallint);''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - - endTime = time.time() - print("total time %ds" % (endTime - startTime)) - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 3647f7defcd52e1ce374e6af3c504e50383cc441 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:05:42 +0800 Subject: [PATCH 69/81] [TD-5074]:test operator cost --- tests/pytest/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index eb068b6585..d54c413fba 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -241,7 +241,7 @@ python3 ./test.py -f query/queryStateWindow.py python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py -python3 ./test.py -f query/operator.py +python3 ./test.py -f query/operator_cost.py python3 test.py -f query/nestedQuery/queryWithSpread.py #stream From e4a67f8046445013895402e47352f55a308a3044 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 14:22:18 +0800 Subject: [PATCH 70/81] update local CI --- tests/Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index c75427b5f4..fb96b669ff 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -48,7 +48,7 @@ pipeline { } } stage('test_b1') { - agent{label 'master'} + agent{label 'slad2'} steps { pre_test() @@ -62,7 +62,7 @@ pipeline { } stage('test_crash_gen') { - agent{label "slad2"} + agent{label "slad3"} steps { pre_test() sh ''' @@ -141,7 +141,7 @@ pipeline { } stage('test_valgrind') { - agent{label "slad3"} + agent{label "slad4"} steps { pre_test() From 1d3df4a9661b4c44660b364796dfdd20222db80c Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Wed, 28 Jul 2021 14:31:16 +0800 Subject: [PATCH 71/81] [TD-5576]: fix localtime is unsafe in multi threads processing (#7045) * [TD-5576]: fix localtime is unsafe in multi threads processing * remove unecessary debug line --- src/plugins/http/src/httpJson.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c index 10300e9367..4bd66a17a3 100644 --- a/src/plugins/http/src/httpJson.c +++ b/src/plugins/http/src/httpJson.c @@ -264,8 +264,7 @@ void httpJsonUInt64(JsonBuf* buf, uint64_t num) { void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { char ts[35] = {0}; - struct tm* ptm; - + int32_t fractionLen; char* format = NULL; time_t quot = 0; @@ -301,8 +300,9 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { assert(false); } - ptm = localtime("); - int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm); + struct tm ptm = {0}; + localtime_r(", &ptm); + int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", &ptm); length += snprintf(ts + length, fractionLen, format, mod); httpJsonString(buf, ts, length); From 6e743d53e6f8b2e0e9ba32a91ba7b9f384953b09 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 28 Jul 2021 14:58:25 +0800 Subject: [PATCH 72/81] [td-225]fix the bug found by regression test. --- src/client/inc/tsclient.h | 2 + src/client/src/tscAsync.c | 102 ++++++++++++++--------------- src/client/src/tscServer.c | 10 ++- src/client/src/tscUtil.c | 22 +++++-- tests/pytest/insert/line_insert.py | 2 + 5 files changed, 79 insertions(+), 59 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 9a627d5cd6..904f5d4503 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -376,6 +376,8 @@ void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta); */ void tscFreeSqlResult(SSqlObj *pSql); +void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap); + /** * free sql object, release allocated resource * @param pObj diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 910a80d6af..c8c9fe85e3 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -336,7 +336,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { pRes->code = code; SSqlObj *sub = (SSqlObj*) res; - const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"[multi-]tableMeta"; + const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"multi-tableMeta"; if (code != TSDB_CODE_SUCCESS) { tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code)); goto _error; @@ -346,62 +346,56 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (pSql->pStream == NULL) { SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); -// assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) == 0); + if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { + tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self); - // super table subquery failure will be ignored -// if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | -// TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { - if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { - tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self); - - code = tsParseSql(pSql, false); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, pSql->self); - return; - } else if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - code = tscGetTableMeta(pSql, pTableMetaInfo); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, pSql->self); - return; - } else { - assert(code == TSDB_CODE_SUCCESS); - } - - (*pSql->fp)(pSql->param, pSql, code); - } else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert - tscImportDataFromFile(pSql); - } else { // sql string insert - tscHandleMultivnodeInsert(pSql); - } - } else { - if (pSql->retryReason != TSDB_CODE_SUCCESS) { - tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self); - tscResetSqlCmd(pCmd, false); - pSql->retryReason = TSDB_CODE_SUCCESS; - } else { - tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self); - } - - code = tsParseSql(pSql, true); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, pSql->self); - return; - } else if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd); - executeQuery(pSql, pQueryInfo1); + code = tsParseSql(pSql, false); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } else if (code != TSDB_CODE_SUCCESS) { + goto _error; } - taosReleaseRef(tscObjRef, pSql->self); - return; -// } + if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + code = tscGetTableMeta(pSql, pTableMetaInfo); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } else { + assert(code == TSDB_CODE_SUCCESS); + } + + (*pSql->fp)(pSql->param, pSql, code); + } else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert + tscImportDataFromFile(pSql); + } else { // sql string insert + tscHandleMultivnodeInsert(pSql); + } + } else { + if (pSql->retryReason != TSDB_CODE_SUCCESS) { + tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self); + tscResetSqlCmd(pCmd, false); + pSql->retryReason = TSDB_CODE_SUCCESS; + } else { + tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self); + } + + code = tsParseSql(pSql, true); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } else if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd); + executeQuery(pSql, pQueryInfo1); + } + + taosReleaseRef(tscObjRef, pSql->self); + return; } else { // stream computing tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pCmd->command); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index dd17e25f58..d631dfad4d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2224,6 +2224,9 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { if (pMultiMeta->metaClone == 1 || pTableMeta->tableType == TSDB_SUPER_TABLE) { STableMetaVgroupInfo p = {.pTableMeta = pTableMeta,}; size_t keyLen = strnlen(pMetaMsg->tableFname, TSDB_TABLE_FNAME_LEN); + void* t = taosHashGet(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen); + assert(t == NULL); + taosHashPut(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen, &p, sizeof(STableMetaVgroupInfo)); } else { freeMeta = true; @@ -2915,7 +2918,9 @@ static void freeElem(void* p) { * @return status code */ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { - SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd); + SSqlCmd* pCmd = &pSql->cmd; + + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); char name[TSDB_TABLE_FNAME_LEN] = {0}; @@ -2934,6 +2939,9 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { // remove stored tableMeta info in hash table tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self); + pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap); + pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SArray* pNameList = taosArrayInit(1, POINTER_BYTES); SArray* vgroupList = taosArrayInit(1, POINTER_BYTES); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ae5a7a69a8..299f60a50f 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1398,6 +1398,22 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) { } } +void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap) { + if (pTableMetaMap == NULL) { + return NULL; + } + + STableMetaVgroupInfo* p = taosHashIterate(pTableMetaMap, NULL); + while (p) { + taosArrayDestroy(p->vgroupIdList); + tfree(p->pTableMeta); + p = taosHashIterate(pTableMetaMap, p); + } + + taosHashCleanup(pTableMetaMap); + return NULL; +} + void tscFreeSqlResult(SSqlObj* pSql) { SSqlRes* pRes = &pSql->res; @@ -3481,11 +3497,9 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in SSqlCmd* pCmd = &pNew->cmd; pCmd->command = cmd; + tsem_init(&pNew->rspSem, 0 ,0); + if (tscAddQueryInfo(pCmd) != TSDB_CODE_SUCCESS) { -#ifdef __APPLE__ - // to satisfy later tsem_destroy in taos_free_result - tsem_init(&pNew->rspSem, 0, 0); -#endif // __APPLE__ tscFreeSqlObj(pNew); return NULL; } diff --git a/tests/pytest/insert/line_insert.py b/tests/pytest/insert/line_insert.py index ff3a32b0f7..53eaa55aa5 100644 --- a/tests/pytest/insert/line_insert.py +++ b/tests/pytest/insert/line_insert.py @@ -77,6 +77,8 @@ class TDTestCase: "sth,t1=4i64,t2=5f64,t4=5f64,ID=\"childtable\" c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641ms", "sth,t1=4i64,t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933654ms" ]) + tdSql.execute('reset query cache') + tdSql.query('select tbname, * from sth') tdSql.checkRows(2) From 8f0be342a8599ac2eb0fdc349d6f0d9d1078f142 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 15:06:30 +0800 Subject: [PATCH 73/81] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py index b94d5fa3b3..774a1e5f42 100644 --- a/tests/pytest/query/operator_cost.py +++ b/tests/pytest/query/operator_cost.py @@ -195,9 +195,6 @@ class TDTestCase: sql = '''select distinct(t_tinyint) from stable_1;''' tdSql.query(sql) tdSql.checkRows(6) - sql = '''select distinct(t_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) sql = '''select distinct(t_nchar) from stable_1;''' tdSql.query(sql) tdSql.checkRows(6) From 0b1c661fff52c9ba301edbab66009b50c226edca Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 28 Jul 2021 16:03:18 +0800 Subject: [PATCH 74/81] [TD-5592]: fix change KV row value coredump --- src/common/inc/tdataformat.h | 3 +- src/common/src/tdataformat.c | 83 +++++++++++++----------------------- 2 files changed, 32 insertions(+), 54 deletions(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 829e771c70..f8394dc271 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -55,7 +55,7 @@ extern "C" { typedef struct { int8_t type; // Column type int16_t colId; // column ID - uint16_t bytes; // column bytes + int16_t bytes; // column bytes (restore to int16_t in case of misuse) uint16_t offset; // point offset in SDataRow after the header part. } STColumn; @@ -366,6 +366,7 @@ typedef struct { #define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) #define kvRowFree(r) tfree(r) #define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) +#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r)) #define kvRowTKey(r) (*(TKEY *)(kvRowValues(r))) #define kvRowKey(r) tdGetKey(kvRowTKey(r)) #define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r)) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 9f7432c90d..5a63588611 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -634,42 +634,28 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { SKVRow nrow = NULL; void * ptr = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE); - if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row + if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row int diff = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type]; - nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff); + int nRowLen = kvRowLen(row) + sizeof(SColIdx) + diff; + int oRowCols = kvRowNCols(row); + + ASSERT(diff > 0); + nrow = malloc(nRowLen); if (nrow == NULL) return -1; - kvRowSetLen(nrow, kvRowLen(row) + (uint16_t)sizeof(SColIdx) + diff); - kvRowSetNCols(nrow, kvRowNCols(row) + 1); + kvRowSetLen(nrow, nRowLen); + kvRowSetNCols(nrow, oRowCols + 1); - if (ptr == NULL) { - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * kvRowNCols(row)); - memcpy(kvRowValues(nrow), kvRowValues(row), POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row))); - int colIdx = kvRowNCols(nrow) - 1; - kvRowColIdxAt(nrow, colIdx)->colId = colId; - kvRowColIdxAt(nrow, colIdx)->offset = (int16_t)(POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row))); - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff); - } else { - int16_t tlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row))); - if (tlen > 0) { - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), tlen); - memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset); - } + memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * oRowCols); + memcpy(kvRowValues(nrow), kvRowValues(row), kvRowValLen(row)); - int colIdx = tlen / sizeof(SColIdx); - kvRowColIdxAt(nrow, colIdx)->colId = colId; - kvRowColIdxAt(nrow, colIdx)->offset = ((SColIdx *)ptr)->offset; - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff); + pColIdx = kvRowColIdxAt(nrow, oRowCols); + pColIdx->colId = colId; + pColIdx->offset = kvRowValLen(row); - for (int i = colIdx; i < kvRowNCols(row); i++) { - kvRowColIdxAt(nrow, i + 1)->colId = kvRowColIdxAt(row, i)->colId; - kvRowColIdxAt(nrow, i + 1)->offset = kvRowColIdxAt(row, i)->offset + diff; - } - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx)), - POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx))) + memcpy(kvRowColVal(nrow, pColIdx), value, diff); // copy new value - ); - } + tdSortKVRowByColIdx(nrow); *orow = nrow; free(row); @@ -680,9 +666,8 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place memcpy(pOldVal, value, varDataTLen(value)); - } else { // need to reallocate the memory - uint16_t diff = varDataTLen(value) - varDataTLen(pOldVal); - uint16_t nlen = kvRowLen(row) + diff; + } else { // need to reallocate the memory + int16_t nlen = kvRowLen(row) + (varDataTLen(value) - varDataTLen(pOldVal)); ASSERT(nlen > 0); nrow = malloc(nlen); if (nrow == NULL) return -1; @@ -690,30 +675,22 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { kvRowSetLen(nrow, nlen); kvRowSetNCols(nrow, kvRowNCols(row)); - // Copy part ahead - nlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row))); - ASSERT(nlen % sizeof(SColIdx) == 0); - if (nlen > 0) { - ASSERT(((SColIdx *)ptr)->offset > 0); - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), nlen); - memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset); + int zsize = sizeof(SColIdx) * kvRowNCols(row) + ((SColIdx *)ptr)->offset; + memcpy(kvRowColIdx(nrow), kvRowColIdx(row), zsize); + memcpy(kvRowColVal(nrow, ((SColIdx *)ptr)), value, varDataTLen(value)); + // Copy left value part + int lsize = kvRowLen(row) - TD_KV_ROW_HEAD_SIZE - zsize - varDataTLen(pOldVal); + if (lsize > 0) { + memcpy(POINTER_SHIFT(nrow, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(value)), + POINTER_SHIFT(row, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(pOldVal)), lsize); } - // Construct current column value - int colIdx = nlen / sizeof(SColIdx); - pColIdx = kvRowColIdxAt(nrow, colIdx); - pColIdx->colId = ((SColIdx *)ptr)->colId; - pColIdx->offset = ((SColIdx *)ptr)->offset; - memcpy(kvRowColVal(nrow, pColIdx), value, varDataTLen(value)); - - // Construct columns after - if (kvRowNCols(nrow) - colIdx - 1 > 0) { - for (int i = colIdx + 1; i < kvRowNCols(nrow); i++) { - kvRowColIdxAt(nrow, i)->colId = kvRowColIdxAt(row, i)->colId; - kvRowColIdxAt(nrow, i)->offset = kvRowColIdxAt(row, i)->offset + diff; + for (int i = 0; i < kvRowNCols(nrow); i++) { + pColIdx = kvRowColIdxAt(nrow, i); + + if (pColIdx->offset > ((SColIdx *)ptr)->offset) { + pColIdx->offset = pColIdx->offset - varDataTLen(pOldVal) + varDataTLen(value); } - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)), - POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)))); } *orow = nrow; From aac0aeddfdf81725e9deb21b79301c3816cbf4bb Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 16:10:06 +0800 Subject: [PATCH 75/81] add SANITIZER parameter --- tests/Jenkinsfile | 29 ++++++++++++++++++++++++++++- tests/mas/Jenkinsfile | 29 ++++++++++++++++++++++++++++- 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index fb96b669ff..ed54515ba8 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -1,5 +1,32 @@ def pre_test(){ + sh ''' + sudo rmtaos||echo 'no taosd installed' + ''' + sh ''' + cd ${WKC} + git reset --hard + git checkout $BRANCH_NAME + git pull + git submodule update + cd ${WK} + git reset --hard + git checkout $BRANCH_NAME + git pull + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake -DMEMORY_SANITIZER=true .. > /dev/null + make > /dev/null + make install > /dev/null + pip3 install ${WKC}/src/connector/python + ''' + return 1 +} +def pre_test_p(){ + sh ''' sudo rmtaos||echo 'no taosd installed' ''' @@ -39,7 +66,7 @@ pipeline { stage('pytest') { agent{label 'slad1'} steps { - pre_test() + pre_test_p() sh ''' cd ${WKC}/tests find pytest -name '*'sql|xargs rm -rf diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile index ae2286298f..4dcc1d9c18 100644 --- a/tests/mas/Jenkinsfile +++ b/tests/mas/Jenkinsfile @@ -1,5 +1,32 @@ def pre_test(){ + sh ''' + sudo rmtaos||echo 'no taosd installed' + ''' + sh ''' + cd ${WKC} + git reset --hard + git checkout $BRANCH_NAME + git pull + git submodule update + cd ${WK} + git reset --hard + git checkout $BRANCH_NAME + git pull + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake -DMEMORY_SANITIZER=true .. > /dev/null + make > /dev/null + make install > /dev/null + pip3 install ${WKC}/src/connector/python/ || echo 0 + ''' + return 1 +} +def pre_test_p(){ + sh ''' sudo rmtaos||echo 'no taosd installed' ''' @@ -39,7 +66,7 @@ pipeline { stage('pytest') { agent{label 'slam1'} steps { - pre_test() + pre_test_p() sh ''' cd ${WKC}/tests find pytest -name '*'sql|xargs rm -rf From 2892d2624c953b0a2c2fa6865016a791c611ce49 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 28 Jul 2021 17:38:38 +0800 Subject: [PATCH 76/81] [TD-2639] : fix typo. --- documentation20/cn/03.architecture/02.replica/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/03.architecture/02.replica/docs.md b/documentation20/cn/03.architecture/02.replica/docs.md index 183306eed8..27ac7f123c 100644 --- a/documentation20/cn/03.architecture/02.replica/docs.md +++ b/documentation20/cn/03.architecture/02.replica/docs.md @@ -111,7 +111,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log) 3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。 4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理 -5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。 +5. vnode B应用在写入成功后,都需要调用syncConfirmForward通知sync模块已经写入成功。 6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。 7. 如果quorum大于1,vnode A需要等待vnode B或其他副本对Forward消息的确认。 8. 如果quorum大于1,vnode A收到quorum-1条确认消息后,调用回调函数confirmForward,通知应用写入成功。 From 2e7e28781848510446e33f5e122bcaeb28c90f89 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 28 Jul 2021 17:53:59 +0800 Subject: [PATCH 77/81] [TD-5591]: taosdemo coredump when query 4096 columns. (#7054) --- src/kit/taosdemo/taosdemo.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c4dec6a231..32bad230cf 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -92,7 +92,6 @@ extern char configDir[]; #define MAX_SUPER_TABLE_COUNT 200 #define MAX_QUERY_SQL_COUNT 100 -#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE #define MAX_DATABASE_COUNT 256 #define INPUT_BUF_LEN 256 @@ -383,7 +382,7 @@ typedef struct SpecifiedQueryInfo_S { uint64_t queryTimes; bool subscribeRestart; int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume[MAX_QUERY_SQL_COUNT]; int endAfterConsume[MAX_QUERY_SQL_COUNT]; @@ -406,7 +405,7 @@ typedef struct SuperQueryInfo_S { int64_t childTblCount; char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume; int endAfterConsume; @@ -1253,14 +1252,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { // fetch the records row by row while((row = taos_fetch_row(res))) { - if (totalLen >= 100*1024*1024 - 32000) { + if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) { if (strlen(pThreadInfo->filePath) > 0) appendResultBufToFile(databuf, pThreadInfo); totalLen = 0; memset(databuf, 0, 100*1024*1024); } num_rows++; - char temp[16000] = {0}; + char temp[HEAD_BUFF_LEN] = {0}; int len = taos_print_row(temp, row, fields, num_fields); len += sprintf(temp + len, "\n"); //printf("query result:%s\n", temp); @@ -2165,15 +2164,15 @@ static void printfDbInfoForQueryToFile( } static void printfQuerySystemInfo(TAOS * taos) { - char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; - char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; + char filename[BUFFER_SIZE+1] = {0}; + char buffer[BUFFER_SIZE+1] = {0}; TAOS_RES* res; time_t t; struct tm* lt; time(&t); lt = localtime(&t); - snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", + snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); @@ -2205,12 +2204,12 @@ static void printfQuerySystemInfo(TAOS * taos) { printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); + snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); // show db.stables - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); + snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); free(dbInfos[i]); @@ -4549,7 +4548,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], - sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + sqlStr->valuestring, BUFFER_SIZE); // default value is -1, which mean infinite loop g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; @@ -4771,7 +4770,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - MAX_QUERY_SQL_LENGTH); + BUFFER_SIZE); cJSON *result = cJSON_GetObjectItem(sql, "result"); if (result != NULL && result->type == cJSON_String @@ -7425,14 +7424,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { tstrncpy(outSql, inSql, pos - inSql + 1); //printf("1: %s\n", outSql); - strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1); + strncat(outSql, subTblName, BUFFER_SIZE - 1); //printf("2: %s\n", outSql); - strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1); + strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1); //printf("3: %s\n", outSql); } static void *superTableQuery(void *sarg) { - char sqlstr[MAX_QUERY_SQL_LENGTH]; + char sqlstr[BUFFER_SIZE]; threadInfo *pThreadInfo = (threadInfo *)sarg; setThreadName("superTableQuery"); @@ -7735,7 +7734,7 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[MAX_QUERY_SQL_LENGTH]; + char subSqlstr[BUFFER_SIZE]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; uint64_t tsubSeq; From a9c51e251a3f3c78a67168e63065a6e12283cb7b Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 18:28:35 +0800 Subject: [PATCH 78/81] remove systemctl --- tests/Jenkinsfile | 4 ++-- tests/mas/Jenkinsfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index ed54515ba8..b5b068381d 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -119,7 +119,7 @@ pipeline { } sh''' - systemctl start taosd + nohup taosd >/dev/null & sleep 10 ''' catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -155,7 +155,7 @@ pipeline { ''' } sh ''' - systemctl stop taosd + pkill -9 taosd cd ${WKC}/tests ./test-all.sh b2 date diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile index 4dcc1d9c18..52b29da92e 100644 --- a/tests/mas/Jenkinsfile +++ b/tests/mas/Jenkinsfile @@ -119,7 +119,7 @@ pipeline { } sh''' - systemctl start taosd + nohup taosd >/dev/null & sleep 10 ''' catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -163,7 +163,7 @@ pipeline { ''' } sh ''' - systemctl stop taosd + pkill -9 taosd cd ${WKC}/tests ./test-all.sh b2 date From 175fd1c93d7b4073a0ffe93e4ea236f1cdb32235 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 18:32:34 +0800 Subject: [PATCH 79/81] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py index 774a1e5f42..27de3531eb 100644 --- a/tests/pytest/query/operator_cost.py +++ b/tests/pytest/query/operator_cost.py @@ -207,9 +207,9 @@ class TDTestCase: sql = '''select distinct(t_ts) from stable_1;''' tdSql.query(sql) tdSql.checkRows(3) - sql = '''select distinct(tbname) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) + # sql = '''select distinct(tbname) from stable_1;''' + # tdSql.query(sql) + # tdSql.checkRows(6) tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") sql = '''select last(q_int),first(q_int) from stable_1;''' From 21bdbb9e507a496cd8e7b68b8f2a67ed35fd6be6 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 29 Jul 2021 10:32:20 +0800 Subject: [PATCH 80/81] [TD-3666] : fix description about binary storage length. --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 72f4876dcf..6d39c25565 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | +| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | | 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | | 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | | 9 | BOOL | 1 | 布尔型,{true, false} | From 61fb774237bb4d6d9695d4f2f0c5cc1c542a52d1 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 29 Jul 2021 14:03:59 +0800 Subject: [PATCH 81/81] [TD-5062] Feature/row partial update (#6970) * [TD-5062]: support partial update --- src/client/src/tscUtil.c | 4 +- src/common/inc/tdataformat.h | 180 ++++++++++-- src/common/src/tdataformat.c | 129 ++++++++- src/common/src/ttypes.c | 32 +-- src/inc/taosdef.h | 8 +- src/inc/tsdb.h | 2 +- src/inc/ttype.h | 8 +- src/tsdb/inc/tsdbBuffer.h | 4 +- src/tsdb/inc/tsdbReadImpl.h | 9 +- src/tsdb/inc/tsdbRowMergeBuf.h | 43 +++ src/tsdb/inc/tsdbint.h | 5 +- src/tsdb/src/tsdbBuffer.c | 4 +- src/tsdb/src/tsdbCommit.c | 4 +- src/tsdb/src/tsdbCommitQueue.c | 2 +- src/tsdb/src/tsdbCompact.c | 2 +- src/tsdb/src/tsdbMain.c | 7 +- src/tsdb/src/tsdbMemTable.c | 366 +++++++++++------------- src/tsdb/src/tsdbRead.c | 507 ++++++++++++++++++++------------- src/tsdb/src/tsdbReadImpl.c | 8 +- src/tsdb/src/tsdbRowMergeBuf.c | 30 ++ src/util/inc/tfunctional.h | 56 ++++ src/util/inc/tskiplist.h | 18 +- src/util/src/tfunctional.c | 48 ++++ src/util/src/tskiplist.c | 49 +++- 24 files changed, 1045 insertions(+), 480 deletions(-) create mode 100644 src/tsdb/inc/tsdbRowMergeBuf.h create mode 100644 src/tsdb/src/tsdbRowMergeBuf.c create mode 100644 src/util/inc/tfunctional.h create mode 100644 src/util/src/tfunctional.c diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 9dc541b1a6..1c610b67a5 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1864,8 +1864,8 @@ static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) { while (i < nColsBound) { int16_t colId = payloadColId(p); uint8_t colType = payloadColType(p); - tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, toffset); - toffset += sizeof(SColIdx); + tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset); + //toffset += sizeof(SColIdx); p = payloadNextCol(p); ++i; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index f8394dc271..47bd8a72b2 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -232,6 +232,83 @@ static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t o } } +static FORCE_INLINE void *tdGetPtrToCol(SDataRow row, STSchema *pSchema, int idx) { + return POINTER_SHIFT(row, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset); +} + +static FORCE_INLINE void *tdGetColOfRowBySchema(SDataRow row, STSchema *pSchema, int idx) { + int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset; + int8_t type = pSchema->columns[idx].type; + + return tdGetRowDataOfCol(row, type, offset); +} + +static FORCE_INLINE bool tdIsColOfRowNullBySchema(SDataRow row, STSchema *pSchema, int idx) { + int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset; + int8_t type = pSchema->columns[idx].type; + + return isNull(tdGetRowDataOfCol(row, type, offset), type); +} + +static FORCE_INLINE void tdSetColOfRowNullBySchema(SDataRow row, STSchema *pSchema, int idx) { + int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset; + int8_t type = pSchema->columns[idx].type; + int16_t bytes = pSchema->columns[idx].bytes; + + setNull(tdGetRowDataOfCol(row, type, offset), type, bytes); +} + +static FORCE_INLINE void tdCopyColOfRowBySchema(SDataRow dst, STSchema *pDstSchema, int dstIdx, SDataRow src, STSchema *pSrcSchema, int srcIdx) { + int8_t type = pDstSchema->columns[dstIdx].type; + ASSERT(type == pSrcSchema->columns[srcIdx].type); + void *pData = tdGetPtrToCol(dst, pDstSchema, dstIdx); + void *value = tdGetPtrToCol(src, pSrcSchema, srcIdx); + + switch (type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + *(VarDataOffsetT *)pData = *(VarDataOffsetT *)value; + pData = POINTER_SHIFT(dst, *(VarDataOffsetT *)pData); + value = POINTER_SHIFT(src, *(VarDataOffsetT *)value); + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t *)pData = *(uint8_t *)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t *)pData = *(uint16_t *)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t *)pData = *(uint32_t *)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t *)pData = *(uint64_t *)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (pSrcSchema->columns[srcIdx].colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY *)pData = tdGetKey(*(TKEY *)value); + } else { + *(TSKEY *)pData = *(TSKEY *)value; + } + break; + default: + memcpy(pData, value, pSrcSchema->columns[srcIdx].bytes); + } +} + + // ----------------- Data column structure typedef struct SDataCol { int8_t type; // column type @@ -335,7 +412,7 @@ void tdResetDataCols(SDataCols *pCols); int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); SDataCols *tdFreeDataCols(SDataCols *pCols); -int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset); +int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull); // ----------------- K-V data row structure /* |<-------------------------------------- len -------------------------------------------->| @@ -398,9 +475,9 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) { } // offset here not include kvRow header length -static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t offset) { +static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) { ASSERT(value != NULL); - int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE; + int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE; SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset); char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row)); @@ -411,7 +488,7 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t memcpy(ptr, value, varDataTLen(value)); kvRowLen(row) += varDataTLen(value); } else { - if (offset == 0) { + if (*offset == 0) { ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); TKEY tvalue = tdGetTKEY(*(TSKEY *)value); memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]); @@ -420,9 +497,27 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t } kvRowLen(row) += TYPE_BYTES[type]; } + *offset += sizeof(SColIdx); return 0; } +// NOTE: offset here including the header size +static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); } + +static FORCE_INLINE void *tdGetKVRowValOfColEx(SKVRow row, int16_t colId, int32_t *nIdx) { + while (*nIdx < kvRowNCols(row)) { + SColIdx *pColIdx = kvRowColIdxAt(row, *nIdx); + if (pColIdx->colId == colId) { + ++(*nIdx); + return tdGetKvRowDataOfCol(row, pColIdx->offset); + } else if (pColIdx->colId > colId) { + return NULL; + } else { + ++(*nIdx); + } + } + return NULL; +} // ----------------- K-V data row builder typedef struct { @@ -495,7 +590,7 @@ typedef void *SMemRow; #define TD_MEM_ROW_KV_VER_SIZE sizeof(int16_t) #define TD_MEM_ROW_KV_TYPE_VER_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE) #define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE) -// #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) +#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) #define SMEM_ROW_DATA 0U // SDataRow #define SMEM_ROW_KV 1U // SKVRow @@ -538,27 +633,80 @@ typedef void *SMemRow; #define memRowSetType(r, t) (memRowType(r) = (t)) #define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l)) -#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowKvSetVersion(r, v)) +#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v)) #define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r)) #define memRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_MEM_ROW_DATA_HEAD_SIZE) #define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r)) SMemRow tdMemRowDup(SMemRow row); -void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols); +void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull); + // NOTE: offset here including the header size -static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); } -// NOTE: offset here including the header size -static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int8_t type, int32_t offset) { +static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) { if (isDataRow(row)) { - return tdGetRowDataOfCol(row, type, offset); - } else if (isKvRow(row)) { - return tdGetKvRowDataOfCol(row, offset); + return tdGetRowDataOfCol(memRowDataBody(row), colType, offset); } else { - ASSERT(0); + return tdGetKVRowValOfCol(memRowKvBody(row), colId); } - return NULL; } +/** + * NOTE: + * 1. Applicable to scan columns one by one + * 2. offset here including the header size + */ +static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_t colType, int32_t offset, + int32_t *kvNIdx) { + if (isDataRow(row)) { + return tdGetRowDataOfCol(memRowDataBody(row), colType, offset); + } else { + return tdGetKVRowValOfColEx(memRowKvBody(row), colId, kvNIdx); + } +} + +static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset, + int32_t *kvOffset) { + if (isDataRow(row)) { + tdAppendColVal(memRowDataBody(row), value, type, offset); + } else { + tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset); + } + return 0; +} + +// make sure schema->flen appended for SDataRow +static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value, int8_t colType) { + int32_t len = 0; + if (IS_VAR_DATA_TYPE(colType)) { + len += varDataTLen(value); + if (rowType == SMEM_ROW_KV) { + len += sizeof(SColIdx); + } + } else { + if (rowType == SMEM_ROW_KV) { + len += TYPE_BYTES[colType]; + len += sizeof(SColIdx); + } + } + return len; +} + + +typedef struct { + int16_t colId; + uint8_t colType; + char* colVal; +} SColInfo; + +static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t colType, char* colVal) { + colInfo->colId = colId; + colInfo->colType = colType; + colInfo->colVal = colVal; +} + +SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2); + + // ----------------- Raw payload structure for row: /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| * | |<----------------- flen ------------->|<--- value part --->| @@ -608,4 +756,4 @@ static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SH } #endif -#endif // _TD_DATA_FORMAT_H_ \ No newline at end of file +#endif // _TD_DATA_FORMAT_H_ diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 5a63588611..8ef3d083c7 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -17,9 +17,10 @@ #include "talgo.h" #include "tcoding.h" #include "wchar.h" +#include "tarray.h" static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, - int limit2, int tRows); + int limit2, int tRows, bool forceSetNull); /** * Duplicate the schema and return a new object @@ -418,7 +419,8 @@ void tdResetDataCols(SDataCols *pCols) { } } } -static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) { + +static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row)); int rcol = 0; @@ -452,8 +454,10 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols } else if (pRowCol->colId < pDataCol->colId) { rcol++; } else { - // dataColSetNullAt(pDataCol, pCols->numOfRows); - dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + if(forceSetNull) { + //dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + } dcol++; } } @@ -461,7 +465,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols pCols->numOfRows++; } -static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols) { +static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row)); int rcol = 0; @@ -498,8 +502,10 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo } else if (colIdx->colId < pDataCol->colId) { ++rcol; } else { - // dataColSetNullAt(pDataCol, pCols->numOfRows); - dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + if (forceSetNull) { + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + } ++dcol; } } @@ -507,17 +513,17 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo pCols->numOfRows++; } -void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols) { +void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { if (isDataRow(row)) { - tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols); + tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull); } else if (isKvRow(row)) { - tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols); + tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull); } else { ASSERT(0); } } -int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) { +int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); int offset = 0; @@ -546,7 +552,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * int iter1 = 0; tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows, - pTarget->numOfRows + rowsToMerge); + pTarget->numOfRows + rowsToMerge, forceSetNull); } tdFreeDataCols(pTarget); @@ -559,7 +565,7 @@ _err: // src2 data has more priority than src1 static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, - int limit2, int tRows) { + int limit2, int tRows, bool forceSetNull) { tdResetDataCols(target); ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows); @@ -588,7 +594,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) { for (int i = 0; i < src2->numOfCols; i++) { ASSERT(target->cols[i].type == src2->cols[i].type); - if (src2->cols[i].len > 0) { + if (src2->cols[i].len > 0 && (forceSetNull || (!forceSetNull && !isNull(src2->cols[i].pData, src2->cols[i].type)))) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, target->maxPoints); } @@ -761,4 +767,97 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) { memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size); return row; -} \ No newline at end of file +} + +SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) { +#if 0 + ASSERT(memRowKey(row1) == memRowKey(row2)); + ASSERT(schemaVersion(pSchema1) == memRowVersion(row1)); + ASSERT(schemaVersion(pSchema2) == memRowVersion(row2)); + ASSERT(schemaVersion(pSchema1) >= schemaVersion(pSchema2)); +#endif + + SArray *stashRow = taosArrayInit(pSchema1->numOfCols, sizeof(SColInfo)); + if (stashRow == NULL) { + return NULL; + } + + SMemRow pRow = buffer; + SDataRow dataRow = memRowDataBody(pRow); + memRowSetType(pRow, SMEM_ROW_DATA); + dataRowSetVersion(dataRow, schemaVersion(pSchema1)); // use latest schema version + dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pSchema1->flen)); + + TDRowTLenT dataLen = 0, kvLen = TD_MEM_ROW_KV_HEAD_SIZE; + + int32_t i = 0; // row1 + int32_t j = 0; // row2 + int32_t nCols1 = schemaNCols(pSchema1); + int32_t nCols2 = schemaNCols(pSchema2); + SColInfo colInfo = {0}; + int32_t kvIdx1 = 0, kvIdx2 = 0; + + while (i < nCols1) { + STColumn *pCol = schemaColAt(pSchema1, i); + void * val1 = tdGetMemRowDataOfColEx(row1, pCol->colId, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset, &kvIdx1); + // if val1 != NULL, use val1; + if (val1 != NULL && !isNull(val1, pCol->type)) { + tdAppendColVal(dataRow, val1, pCol->type, pCol->offset); + kvLen += tdGetColAppendLen(SMEM_ROW_KV, val1, pCol->type); + setSColInfo(&colInfo, pCol->colId, pCol->type, val1); + taosArrayPush(stashRow, &colInfo); + ++i; // next col + continue; + } + + void *val2 = NULL; + while (j < nCols2) { + STColumn *tCol = schemaColAt(pSchema2, j); + if (tCol->colId < pCol->colId) { + ++j; + continue; + } + if (tCol->colId == pCol->colId) { + val2 = tdGetMemRowDataOfColEx(row2, tCol->colId, tCol->type, TD_DATA_ROW_HEAD_SIZE + tCol->offset, &kvIdx2); + } else if (tCol->colId > pCol->colId) { + // set NULL + } + break; + } // end of while(jtype); + } + tdAppendColVal(dataRow, val2, pCol->type, pCol->offset); + if (!isNull(val2, pCol->type)) { + kvLen += tdGetColAppendLen(SMEM_ROW_KV, val2, pCol->type); + setSColInfo(&colInfo, pCol->colId, pCol->type, val2); + taosArrayPush(stashRow, &colInfo); + } + + ++i; // next col + } + + dataLen = memRowTLen(pRow); + + if (kvLen < dataLen) { + // scan stashRow and generate SKVRow + memset(buffer, 0, sizeof(dataLen)); + SMemRow tRow = buffer; + memRowSetType(tRow, SMEM_ROW_KV); + SKVRow kvRow = (SKVRow)memRowKvBody(tRow); + int16_t nKvNCols = (int16_t) taosArrayGetSize(stashRow); + kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nKvNCols)); + kvRowSetNCols(kvRow, nKvNCols); + memRowSetKvVersion(tRow, pSchema1->version); + + int32_t toffset = 0; + int16_t k; + for (k = 0; k < nKvNCols; ++k) { + SColInfo *pColInfo = taosArrayGet(stashRow, k); + tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset); + } + ASSERT(kvLen == memRowTLen(tRow)); + } + taosArrayDestroy(stashRow); + return buffer; +} diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 13bcfe6480..eeffe49adc 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -409,7 +409,7 @@ bool isValidDataType(int32_t type) { return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT; } -void setVardataNull(char* val, int32_t type) { +void setVardataNull(void* val, int32_t type) { if (type == TSDB_DATA_TYPE_BINARY) { varDataSetLen(val, sizeof(int8_t)); *(uint8_t*) varDataVal(val) = TSDB_DATA_BINARY_NULL; @@ -421,75 +421,75 @@ void setVardataNull(char* val, int32_t type) { } } -void setNull(char *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); } +void setNull(void *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); } -void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { +void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems) { switch (type) { case TSDB_DATA_TYPE_BOOL: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BOOL_NULL; + *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BOOL_NULL; } break; case TSDB_DATA_TYPE_TINYINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_TINYINT_NULL; + *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_TINYINT_NULL; } break; case TSDB_DATA_TYPE_SMALLINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_SMALLINT_NULL; + *(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_SMALLINT_NULL; } break; case TSDB_DATA_TYPE_INT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_INT_NULL; + *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_INT_NULL; } break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BIGINT_NULL; + *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BIGINT_NULL; } break; case TSDB_DATA_TYPE_UTINYINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UTINYINT_NULL; + *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UTINYINT_NULL; } break; case TSDB_DATA_TYPE_USMALLINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_USMALLINT_NULL; + *(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_USMALLINT_NULL; } break; case TSDB_DATA_TYPE_UINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UINT_NULL; + *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UINT_NULL; } break; case TSDB_DATA_TYPE_UBIGINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UBIGINT_NULL; + *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UBIGINT_NULL; } break; case TSDB_DATA_TYPE_FLOAT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_FLOAT_NULL; + *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_FLOAT_NULL; } break; case TSDB_DATA_TYPE_DOUBLE: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_DOUBLE_NULL; + *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_DOUBLE_NULL; } break; case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_BINARY: for (int32_t i = 0; i < numOfElems; ++i) { - setVardataNull(val + i * bytes, type); + setVardataNull(POINTER_SHIFT(val, i * bytes), type); } break; default: { for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * tDataTypes[TSDB_DATA_TYPE_INT].bytes) = TSDB_DATA_INT_NULL; + *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[TSDB_DATA_TYPE_INT].bytes)) = TSDB_DATA_INT_NULL; } break; } diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index ca8ad3cc09..4a5497795f 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -307,7 +307,7 @@ do { \ #define TSDB_DEFAULT_WAL_LEVEL 1 #define TSDB_MIN_DB_UPDATE 0 -#define TSDB_MAX_DB_UPDATE 1 +#define TSDB_MAX_DB_UPDATE 2 #define TSDB_DEFAULT_DB_UPDATE_OPTION 0 #define TSDB_MIN_DB_CACHE_LAST_ROW 0 @@ -435,6 +435,12 @@ typedef enum { TSDB_CHECK_ITEM_MAX } ECheckItemType; +typedef enum { + TD_ROW_DISCARD_UPDATE = 0, + TD_ROW_OVERWRITE_UPDATE = 1, + TD_ROW_PARTIAL_UPDATE = 2 +} TDUpdateConfig; + extern char *qtypeStr[]; #ifdef __cplusplus diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 79d9029dbc..7fd6d8c10c 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -111,7 +111,7 @@ typedef struct { uint64_t superUid; STSchema * schema; STSchema * tagSchema; - SDataRow tagValues; + SKVRow tagValues; char * sql; } STableCfg; diff --git a/src/inc/ttype.h b/src/inc/ttype.h index fcd2651c70..44e666106a 100644 --- a/src/inc/ttype.h +++ b/src/inc/ttype.h @@ -141,7 +141,7 @@ typedef struct { #define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX) #define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX) -static FORCE_INLINE bool isNull(const char *val, int32_t type) { +static FORCE_INLINE bool isNull(const void *val, int32_t type) { switch (type) { case TSDB_DATA_TYPE_BOOL: return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; @@ -193,9 +193,9 @@ extern tDataTypeDescriptor tDataTypes[15]; bool isValidDataType(int32_t type); -void setVardataNull(char* val, int32_t type); -void setNull(char *val, int32_t type, int32_t bytes); -void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems); +void setVardataNull(void* val, int32_t type); +void setNull(void *val, int32_t type, int32_t bytes); +void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems); const void *getNullValue(int32_t type); void assignVal(char *val, const char *src, int32_t len, int32_t type); diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h index 4e18ac711a..ec6b057aef 100644 --- a/src/tsdb/inc/tsdbBuffer.h +++ b/src/tsdb/inc/tsdbBuffer.h @@ -40,7 +40,7 @@ void tsdbFreeBufPool(STsdbBufPool* pBufPool); int tsdbOpenBufPool(STsdbRepo* pRepo); void tsdbCloseBufPool(STsdbRepo* pRepo); SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); -int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks); +int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks); void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode); -#endif /* _TD_TSDB_BUFFER_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/inc/tsdbReadImpl.h b/src/tsdb/inc/tsdbReadImpl.h index e7840d9723..814c4d1305 100644 --- a/src/tsdb/inc/tsdbReadImpl.h +++ b/src/tsdb/inc/tsdbReadImpl.h @@ -16,6 +16,13 @@ #ifndef _TD_TSDB_READ_IMPL_H_ #define _TD_TSDB_READ_IMPL_H_ +#include "tfs.h" +#include "tsdb.h" +#include "os.h" +#include "tsdbFile.h" +#include "tskiplist.h" +#include "tsdbMeta.h" + typedef struct SReadH SReadH; typedef struct { @@ -143,4 +150,4 @@ static FORCE_INLINE int tsdbMakeRoom(void **ppBuf, size_t size) { return 0; } -#endif /*_TD_TSDB_READ_IMPL_H_*/ \ No newline at end of file +#endif /*_TD_TSDB_READ_IMPL_H_*/ diff --git a/src/tsdb/inc/tsdbRowMergeBuf.h b/src/tsdb/inc/tsdbRowMergeBuf.h new file mode 100644 index 0000000000..302bf25750 --- /dev/null +++ b/src/tsdb/inc/tsdbRowMergeBuf.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TSDB_ROW_MERGE_BUF_H +#define TSDB_ROW_MERGE_BUF_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tsdb.h" +#include "tchecksum.h" +#include "tsdbReadImpl.h" + +typedef void* SMergeBuf; + +SDataRow tsdbMergeTwoRows(SMergeBuf *pBuf, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2); + +static FORCE_INLINE int tsdbMergeBufMakeSureRoom(SMergeBuf *pBuf, STSchema* pSchema1, STSchema* pSchema2) { + return tsdbMakeRoom(pBuf, MAX(dataRowMaxBytesFromSchema(pSchema1), dataRowMaxBytesFromSchema(pSchema2))); +} + +static FORCE_INLINE void tsdbFreeMergeBuf(SMergeBuf buf) { + taosTZfree(buf); +} + +#ifdef __cplusplus +} +#endif + +#endif /* ifndef TSDB_ROW_MERGE_BUF_H */ diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index dd43e39310..757a0951e8 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -68,6 +68,8 @@ extern "C" { #include "tsdbCompact.h" // Commit Queue #include "tsdbCommitQueue.h" + +#include "tsdbRowMergeBuf.h" // Main definitions struct STsdbRepo { uint8_t state; @@ -92,6 +94,7 @@ struct STsdbRepo { pthread_mutex_t mutex; bool repoLocked; int32_t code; // Commit code + SMergeBuf mergeBuf; //used when update=2 bool inCompact; // is in compact process? }; @@ -139,4 +142,4 @@ static FORCE_INLINE int tsdbGetNextMaxTables(int tid) { } #endif -#endif /* _TD_TSDB_INT_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_INT_H_ */ diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index 429ea8e0ce..e675bf6f9d 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -159,7 +159,7 @@ _err: static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } -int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) { +int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) { if (oldTotalBlocks == pRepo->config.totalBlocks) { return TSDB_CODE_SUCCESS; } @@ -199,4 +199,4 @@ void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) { tsdbFreeBufBlock(pBufBlock); free(pNode); pPool->nBufBlocks--; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6330da6058..6c98283189 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -1290,7 +1290,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ASSERT(pSchema != NULL); } - tdAppendMemRowToDataCol(row, pSchema, pTarget); + tdAppendMemRowToDataCol(row, pSchema, pTarget, true); } tSkipListIterNext(pCommitIter->pIter); @@ -1302,7 +1302,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ASSERT(pSchema != NULL); } - tdAppendMemRowToDataCol(row, pSchema, pTarget); + tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE); } } else { ASSERT(!isRowDel); diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c index e45ac05e97..59fb4f334d 100644 --- a/src/tsdb/src/tsdbCommitQueue.c +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -138,7 +138,7 @@ static void tsdbApplyRepoConfig(STsdbRepo *pRepo) { pSaveCfg->compression, pSaveCfg->keep,pSaveCfg->keep1, pSaveCfg->keep2, pSaveCfg->totalBlocks, oldCfg.cacheLastRow, pSaveCfg->cacheLastRow, oldTotalBlocks, pSaveCfg->totalBlocks); - int err = tsdbExpendPool(pRepo, oldTotalBlocks); + int err = tsdbExpandPool(pRepo, oldTotalBlocks); if (!TAOS_SUCCEEDED(err)) { tsdbError("vgId:%d expand pool from %d to %d fail,reason:%s", REPO_ID(pRepo), oldTotalBlocks, pSaveCfg->totalBlocks, tstrerror(err)); diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 5211ee3c61..0490f26b5e 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -455,7 +455,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) { if (pReadh->pDCols[0]->numOfRows - ridx == 0) break; int rowsToMerge = MIN(pReadh->pDCols[0]->numOfRows - ridx, defaultRows - pComph->pDataCols->numOfRows); - tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx); + tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx, pCfg->update != TD_ROW_PARTIAL_UPDATE); if (pComph->pDataCols->numOfRows < defaultRows) { break; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index f3a7c4b7ee..c877bfc7af 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -14,6 +14,7 @@ */ // no test file errors here +#include "taosdef.h" #include "tsdbint.h" #define IS_VALID_PRECISION(precision) \ @@ -106,6 +107,8 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { return NULL; } + pRepo->mergeBuf = NULL; + tsdbStartStream(pRepo); tsdbDebug("vgId:%d, TSDB repository opened", REPO_ID(pRepo)); @@ -518,7 +521,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } // update check - if (pCfg->update != 0) pCfg->update = 1; + if (pCfg->update < TD_ROW_DISCARD_UPDATE || pCfg->update > TD_ROW_PARTIAL_UPDATE) + pCfg->update = TD_ROW_DISCARD_UPDATE; // update cacheLastRow if (pCfg->cacheLastRow != 0) { @@ -597,6 +601,7 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) { tsdbFreeFS(pRepo->fs); tsdbFreeBufPool(pRepo->pPool); tsdbFreeMeta(pRepo->tsdbMeta); + tsdbFreeMergeBuf(pRepo->mergeBuf); // tsdbFreeMemTable(pRepo->mem); // tsdbFreeMemTable(pRepo->imem); tsem_destroy(&(pRepo->readyToCommit)); diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index ee9dadd9b7..8bb2d1c44e 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -13,7 +13,11 @@ * along with this program. If not, see . */ +#include "tdataformat.h" +#include "tfunctional.h" #include "tsdbint.h" +#include "tskiplist.h" +#include "tsdbRowMergeBuf.h" #define TSDB_DATA_SKIPLIST_LEVEL 5 #define TSDB_MAX_INSERT_BATCH 512 @@ -30,24 +34,22 @@ typedef struct { void * pMsg; } SSubmitMsgIter; -static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo); -static void tsdbFreeMemTable(SMemTable *pMemTable); -static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); -static void tsdbFreeTableData(STableData *pTableData); -static char * tsdbGetTsTupleKey(const void *data); +static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo); +static void tsdbFreeMemTable(SMemTable *pMemTable); +static STableData* tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); +static void tsdbFreeTableData(STableData *pTableData); +static char * tsdbGetTsTupleKey(const void *data); static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables); -static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row); +static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row); static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); -static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); +static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows); -static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow); static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); -static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter); -static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter); -static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row); +static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row); + static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SMemRow row, TSKEY minKey, TSKEY maxKey, TSKEY now); @@ -342,7 +344,7 @@ int tsdbSyncCommit(STsdbRepo *repo) { * 3. rowsIncreased = rowsInserted - rowsDeleteSucceed >= maxRowsToRead * 4. operations in pCols not exceeds its max capacity if pCols is given * - * The function tries to procceed AS MUSH AS POSSIBLE. + * The function tries to procceed AS MUCH AS POSSIBLE. */ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols, TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) { @@ -523,9 +525,15 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) { pTableData->keyLast = 0; pTableData->numOfRows = 0; + uint8_t skipListCreateFlags; + if(pCfg->update == TD_ROW_DISCARD_UPDATE) + skipListCreateFlags = SL_DISCARD_DUP_KEY; + else + skipListCreateFlags = SL_UPDATE_DUP_KEY; + pTableData->pData = tSkipListCreate(TSDB_DATA_SKIPLIST_LEVEL, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], - tkeyComparFn, pCfg->update ? SL_UPDATE_DUP_KEY : SL_DISCARD_DUP_KEY, tsdbGetTsTupleKey); + tkeyComparFn, skipListCreateFlags, tsdbGetTsTupleKey); if (pTableData->pData == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; free(pTableData); @@ -581,7 +589,7 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema * } } - tdAppendMemRowToDataCol(row, *ppSchema, pCols); + tdAppendMemRowToDataCol(row, *ppSchema, pCols, true); } return 0; @@ -693,45 +701,152 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { return 0; } -static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows) { - STsdbMeta * pMeta = pRepo->tsdbMeta; - int64_t points = 0; - STable * pTable = NULL; - SSubmitBlkIter blkIter = {0}; - SMemRow row = NULL; - void * rows[TSDB_MAX_INSERT_BATCH] = {0}; - int rowCounter = 0; +//row1 has higher priority +static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, STSchema **ppSchema1, STSchema **ppSchema2, STable* pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) { + + //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows! + if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) { + (*pAffectedRows)++; + (*pPoints)++; + return NULL; + } - ASSERT(pBlock->tid < pMeta->maxTables); - pTable = pMeta->tables[pBlock->tid]; - ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); + if(row2 == NULL || pRepo->config.update != TD_ROW_PARTIAL_UPDATE) { + void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1)); + if(pMem == NULL) return NULL; + memRowCpy(pMem, row1); + (*pAffectedRows)++; + (*pPoints)++; + *pLastRow = pMem; + return pMem; + } - tsdbInitSubmitBlkIter(pBlock, &blkIter); - while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { - if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) { - tsdbFreeRows(pRepo, rows, rowCounter); - goto _err; + STSchema *pSchema1 = *ppSchema1; + STSchema *pSchema2 = *ppSchema2; + SMergeBuf * pBuf = &pRepo->mergeBuf; + int dv1 = memRowVersion(row1); + int dv2 = memRowVersion(row2); + if(pSchema1 == NULL || schemaVersion(pSchema1) != dv1) { + if(pSchema2 != NULL && schemaVersion(pSchema2) == dv1) { + *ppSchema1 = pSchema2; + } else { + *ppSchema1 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row1)); } + pSchema1 = *ppSchema1; + } - (*affectedrows)++; - points++; - - if (rows[rowCounter] != NULL) { - rowCounter++; - } - - if (rowCounter == TSDB_MAX_INSERT_BATCH) { - if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) { - goto _err; - } - - rowCounter = 0; - memset(rows, 0, sizeof(rows)); + if(pSchema2 == NULL || schemaVersion(pSchema2) != dv2) { + if(schemaVersion(pSchema1) == dv2) { + pSchema2 = pSchema1; + } else { + *ppSchema2 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row2)); + pSchema2 = *ppSchema2; } } - if (rowCounter > 0 && tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) { - goto _err; + SMemRow tmp = tsdbMergeTwoRows(pBuf, row1, row2, pSchema1, pSchema2); + + void* pMem = tsdbAllocBytes(pRepo, memRowTLen(tmp)); + if(pMem == NULL) return NULL; + memRowCpy(pMem, tmp); + + (*pAffectedRows)++; + (*pPoints)++; + + *pLastRow = pMem; + return pMem; +} + +static void* tsdbInsertDupKeyMergePacked(void** args) { + return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7], args[8]); +} + +static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) { + + if(pSkipList->insertHandleFn == NULL) { + tGenericSavedFunc *dupHandleSavedFunc = genericSavedFuncInit((GenericVaFunc)&tsdbInsertDupKeyMergePacked, 9); + dupHandleSavedFunc->args[2] = pRepo; + dupHandleSavedFunc->args[3] = NULL; + dupHandleSavedFunc->args[4] = NULL; + dupHandleSavedFunc->args[5] = pTable; + dupHandleSavedFunc->args[6] = pAffectedRows; + dupHandleSavedFunc->args[7] = pPoints; + dupHandleSavedFunc->args[8] = pLastRow; + pSkipList->insertHandleFn = dupHandleSavedFunc; + } +} + +static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *pAffectedRows) { + + STsdbMeta *pMeta = pRepo->tsdbMeta; + int64_t points = 0; + STable *pTable = NULL; + SSubmitBlkIter blkIter = {0}; + SMemTable *pMemTable = NULL; + STableData *pTableData = NULL; + STsdbCfg *pCfg = &(pRepo->config); + + tsdbInitSubmitBlkIter(pBlock, &blkIter); + if(blkIter.row == NULL) return 0; + TSKEY firstRowKey = memRowKey(blkIter.row); + + tsdbAllocBytes(pRepo, 0); + pMemTable = pRepo->mem; + + ASSERT(pMemTable != NULL); + ASSERT(pBlock->tid < pMeta->maxTables); + + pTable = pMeta->tables[pBlock->tid]; + + ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); + + + if (TABLE_TID(pTable) >= pMemTable->maxTables) { + if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { + return -1; + } + } + pTableData = pMemTable->tData[TABLE_TID(pTable)]; + + if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) { + if (pTableData != NULL) { + taosWLockLatch(&(pMemTable->latch)); + pMemTable->tData[TABLE_TID(pTable)] = NULL; + tsdbFreeTableData(pTableData); + taosWUnLockLatch(&(pMemTable->latch)); + } + + pTableData = tsdbNewTableData(pCfg, pTable); + if (pTableData == NULL) { + tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo), + TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno)); + return -1; + } + + pRepo->mem->tData[TABLE_TID(pTable)] = pTableData; + } + + ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable)); + + SMemRow lastRow = NULL; + int64_t osize = SL_SIZE(pTableData->pData); + tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, pAffectedRows, &points, &lastRow); + tSkipListPutBatchByIter(pTableData->pData, &blkIter, (iter_next_fn_t)tsdbGetSubmitBlkNext); + int64_t dsize = SL_SIZE(pTableData->pData) - osize; + + + if(lastRow != NULL) { + TSKEY lastRowKey = memRowKey(lastRow); + if (pMemTable->keyFirst > firstRowKey) pMemTable->keyFirst = firstRowKey; + pMemTable->numOfRows += dsize; + + if (pTableData->keyFirst > firstRowKey) pTableData->keyFirst = firstRowKey; + pTableData->numOfRows += dsize; + if (pMemTable->keyLast < lastRowKey) pMemTable->keyLast = lastRowKey; + if (pTableData->keyLast < lastRowKey) pTableData->keyLast = lastRowKey; + if (tsdbUpdateTableLatestInfo(pRepo, pTable, lastRow) < 0) { + return -1; + } } STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); @@ -739,48 +854,8 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t * pRepo->stat.totalStorage += points * schemaVLen(pSchema); return 0; - -_err: - return -1; } -static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow) { - STsdbCfg * pCfg = &pRepo->config; - TKEY tkey = memRowTKey(row); - TSKEY key = memRowKey(row); - bool isRowDelete = TKEY_IS_DELETED(tkey); - - if (isRowDelete) { - if (!pCfg->update) { - tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo)); - terrno = TSDB_CODE_TDB_INVALID_ACTION; - return -1; - } - - TSKEY lastKey = tsdbGetTableLastKeyImpl(pTable); - if (key > lastKey) { - tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64, - REPO_ID(pRepo), key, lastKey); - return 0; - } - } - - void *pRow = tsdbAllocBytes(pRepo, memRowTLen(row)); - if (pRow == NULL) { - tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %" PRIu32 " bytes since %s", - REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), memRowTLen(row), tstrerror(terrno)); - return -1; - } - - memRowCpy(pRow, row); - ppRow[0] = pRow; // save the memory address of data rows - - tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), - isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), - key); - - return 0; -} static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) { if (pMsg == NULL) { @@ -889,106 +964,6 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT return 0; } -static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter) { - if (rowCounter < 1) return 0; - - SMemTable * pMemTable = NULL; - STableData *pTableData = NULL; - STsdbMeta * pMeta = pRepo->tsdbMeta; - STsdbCfg * pCfg = &(pRepo->config); - - ASSERT(pRepo->mem != NULL); - pMemTable = pRepo->mem; - - if (TABLE_TID(pTable) >= pMemTable->maxTables) { - if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { - tsdbFreeRows(pRepo, rows, rowCounter); - return -1; - } - } - pTableData = pMemTable->tData[TABLE_TID(pTable)]; - - if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) { - if (pTableData != NULL) { - taosWLockLatch(&(pMemTable->latch)); - pMemTable->tData[TABLE_TID(pTable)] = NULL; - tsdbFreeTableData(pTableData); - taosWUnLockLatch(&(pMemTable->latch)); - } - - pTableData = tsdbNewTableData(pCfg, pTable); - if (pTableData == NULL) { - tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo), - TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno)); - tsdbFreeRows(pRepo, rows, rowCounter); - return -1; - } - - pRepo->mem->tData[TABLE_TID(pTable)] = pTableData; - } - - ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable)); - - int64_t osize = SL_SIZE(pTableData->pData); - tSkipListPutBatch(pTableData->pData, rows, rowCounter); - int64_t dsize = SL_SIZE(pTableData->pData) - osize; - TSKEY keyFirstRow = memRowKey(rows[0]); - TSKEY keyLastRow = memRowKey(rows[rowCounter - 1]); - - if (pMemTable->keyFirst > keyFirstRow) pMemTable->keyFirst = keyFirstRow; - if (pMemTable->keyLast < keyLastRow) pMemTable->keyLast = keyLastRow; - pMemTable->numOfRows += dsize; - - if (pTableData->keyFirst > keyFirstRow) pTableData->keyFirst = keyFirstRow; - if (pTableData->keyLast < keyLastRow) pTableData->keyLast = keyLastRow; - pTableData->numOfRows += dsize; - - // update table latest info - if (tsdbUpdateTableLatestInfo(pRepo, pTable, rows[rowCounter - 1]) < 0) { - return -1; - } - - return 0; -} - -static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) { - ASSERT(pRepo->mem != NULL); - STsdbBufPool *pBufPool = pRepo->pPool; - - for (int i = rowCounter - 1; i >= 0; --i) { - SMemRow row = (SMemRow)rows[i]; - int bytes = (int)memRowTLen(row); - - if (pRepo->mem->extraBuffList == NULL) { - STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); - ASSERT(pBufBlock != NULL && pBufBlock->offset >= bytes); - - pBufBlock->offset -= bytes; - pBufBlock->remain += bytes; - ASSERT(row == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset)); - tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes, - listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain); - - if (pBufBlock->offset == 0) { // return the block to buffer pool - if (tsdbLockRepo(pRepo) < 0) return; - SListNode *pNode = tdListPopTail(pRepo->mem->bufBlockList); - tdListPrependNode(pBufPool->bufBlockList, pNode); - if (tsdbUnlockRepo(pRepo) < 0) return; - } - } else { - ASSERT(listNEles(pRepo->mem->extraBuffList) > 0); - SListNode *pNode = tdListPopTail(pRepo->mem->extraBuffList); - ASSERT(row == pNode->data); - free(pNode); - tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes); - - if (listNEles(pRepo->mem->extraBuffList) == 0) { - tdListFree(pRepo->mem->extraBuffList); - pRepo->mem->extraBuffList = NULL; - } - } - } -} static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow row) { tsdbDebug("vgId:%d updateTableLatestColumn, %s row version:%d", REPO_ID(pRepo), pTable->name->data, @@ -1005,8 +980,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro } SDataCol *pLatestCols = pTable->lastCols; + int32_t kvIdx = 0; - bool isDataRow = isDataRow(row); for (int16_t j = 0; j < schemaNCols(pSchema); j++) { STColumn *pTCol = schemaColAt(pSchema, j); // ignore not exist colId @@ -1017,16 +992,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro void *value = NULL; - if (isDataRow) { - value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type, - TD_DATA_ROW_HEAD_SIZE + pTCol->offset); - } else { - // SKVRow - SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId); - if (pColIdx) { - value = tdGetKvRowDataOfCol(memRowKvBody(row), pColIdx->offset); - } - } + value = tdGetMemRowDataOfColEx(row, pTCol->colId, (int8_t)pTCol->type, + TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset, &kvIdx); if ((value == NULL) || isNull(value, pTCol->type)) { continue; @@ -1055,13 +1022,14 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow r // if cacheLastRow config has been reset, free the lastRow if (!pCfg->cacheLastRow && pTable->lastRow != NULL) { - taosTZfree(pTable->lastRow); + SMemRow cachedLastRow = pTable->lastRow; TSDB_WLOCK_TABLE(pTable); pTable->lastRow = NULL; TSDB_WUNLOCK_TABLE(pTable); + taosTZfree(cachedLastRow); } - if (tsdbGetTableLastKeyImpl(pTable) < memRowKey(row)) { + if (tsdbGetTableLastKeyImpl(pTable) <= memRowKey(row)) { if (CACHE_LAST_ROW(pCfg) || pTable->lastRow != NULL) { SMemRow nrow = pTable->lastRow; if (taosTSizeof(nrow) < memRowTLen(row)) { diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 7a595da39a..f5c01d86e7 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -14,11 +14,14 @@ */ #include "os.h" +#include "tdataformat.h" +#include "tskiplist.h" #include "tulog.h" #include "talgo.h" #include "tcompare.h" #include "exception.h" +#include "taosdef.h" #include "tlosertree.h" #include "tsdbint.h" #include "texpr.h" @@ -68,6 +71,12 @@ typedef struct SLoadCompBlockInfo { int32_t fileId; } SLoadCompBlockInfo; +enum { + CHECKINFO_CHOSEN_MEM = 0, + CHECKINFO_CHOSEN_IMEM = 1, + CHECKINFO_CHOSEN_BOTH = 2 //for update=2(merge case) +}; + typedef struct STableCheckInfo { STableId tableId; @@ -76,7 +85,7 @@ typedef struct STableCheckInfo { SBlockInfo* pCompInfo; int32_t compSize; int32_t numOfBlocks:29; // number of qualified data blocks not the original blocks - int8_t chosen:2; // indicate which iterator should move forward + uint8_t chosen:2; // indicate which iterator should move forward bool initBuf; // whether to initialize the in-memory skip list iterator or not SSkipListIterator* iter; // mem buffer skip list iterator SSkipListIterator* iiter; // imem buffer skip list iterator @@ -781,7 +790,62 @@ static void destroyTableMemIterator(STableCheckInfo* pCheckInfo) { tSkipListDestroyIter(pCheckInfo->iiter); } -static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { +static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { + SMemRow rmem = NULL, rimem = NULL; + if (pCheckInfo->iter) { + SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); + if (node != NULL) { + rmem = (SMemRow)SL_GET_NODE_DATA(node); + } + } + + if (pCheckInfo->iiter) { + SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); + if (node != NULL) { + rimem = (SMemRow)SL_GET_NODE_DATA(node); + } + } + + if (rmem == NULL && rimem == NULL) { + return TSKEY_INITIAL_VAL; + } + + if (rmem != NULL && rimem == NULL) { + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; + return memRowKey(rmem); + } + + if (rmem == NULL && rimem != NULL) { + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; + return memRowKey(rimem); + } + + TSKEY r1 = memRowKey(rmem); + TSKEY r2 = memRowKey(rimem); + + if (r1 == r2) { + if(update == TD_ROW_DISCARD_UPDATE){ + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; + tSkipListIterNext(pCheckInfo->iter); + } + else if(update == TD_ROW_OVERWRITE_UPDATE) { + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; + tSkipListIterNext(pCheckInfo->iiter); + } else { + pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH; + } + return r1; + } else if (r1 < r2 && ASCENDING_TRAVERSE(order)) { + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; + return r1; + } + else { + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; + return r2; + } +} + +static SMemRow getSMemRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, SMemRow* extraRow) { SMemRow rmem = NULL, rimem = NULL; if (pCheckInfo->iter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); @@ -814,31 +878,35 @@ static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, TSKEY r1 = memRowKey(rmem); TSKEY r2 = memRowKey(rimem); - if (r1 == r2) { // data ts are duplicated, ignore the data in mem - if (!update) { + if (r1 == r2) { + if (update == TD_ROW_DISCARD_UPDATE) { tSkipListIterNext(pCheckInfo->iter); - pCheckInfo->chosen = 1; + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; return rimem; - } else { + } else if(update == TD_ROW_OVERWRITE_UPDATE){ tSkipListIterNext(pCheckInfo->iiter); - pCheckInfo->chosen = 0; + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; + return rmem; + } else { + pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH; + extraRow = rimem; return rmem; } } else { if (ASCENDING_TRAVERSE(order)) { if (r1 < r2) { - pCheckInfo->chosen = 0; + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; return rmem; } else { - pCheckInfo->chosen = 1; + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; return rimem; } } else { if (r1 < r2) { - pCheckInfo->chosen = 1; + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; return rimem; } else { - pCheckInfo->chosen = 0; + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; return rmem; } } @@ -847,7 +915,7 @@ static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) { bool hasNext = false; - if (pCheckInfo->chosen == 0) { + if (pCheckInfo->chosen == CHECKINFO_CHOSEN_MEM) { if (pCheckInfo->iter != NULL) { hasNext = tSkipListIterNext(pCheckInfo->iter); } @@ -859,7 +927,7 @@ static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) { if (pCheckInfo->iiter != NULL) { return tSkipListIterGet(pCheckInfo->iiter) != NULL; } - } else { //pCheckInfo->chosen == 1 + } else if (pCheckInfo->chosen == CHECKINFO_CHOSEN_IMEM){ if (pCheckInfo->iiter != NULL) { hasNext = tSkipListIterNext(pCheckInfo->iiter); } @@ -871,6 +939,13 @@ static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) { if (pCheckInfo->iter != NULL) { return tSkipListIterGet(pCheckInfo->iter) != NULL; } + } else { + if (pCheckInfo->iter != NULL) { + hasNext = tSkipListIterNext(pCheckInfo->iter); + } + if (pCheckInfo->iiter != NULL) { + hasNext = tSkipListIterNext(pCheckInfo->iiter) || hasNext; + } } return hasNext; @@ -891,7 +966,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { initTableMemIterator(pHandle, pCheckInfo); } - SMemRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order, pCfg->update); + SMemRow row = getSMemRowInTableMem(pCheckInfo, pHandle->order, pCfg->update, NULL); if (row == NULL) { return false; } @@ -1147,25 +1222,28 @@ static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t c static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols); static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle); static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SDataBlockInfo* pBlockInfo, int32_t endPos); +static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order, int32_t update); static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, STableCheckInfo* pCheckInfo){ SQueryFilePos* cur = &pQueryHandle->cur; STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); + TSKEY key; int32_t code = TSDB_CODE_SUCCESS; /*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo); - SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); assert(cur->pos >= 0 && cur->pos <= binfo.rows); - TSKEY key = (row != NULL) ? memRowKey(row) : TSKEY_INITIAL_VAL; + key = extractFirstTraverseKey(pCheckInfo, pQueryHandle->order, pCfg->update); + if (key != TSKEY_INITIAL_VAL) { tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId); } else { tsdbDebug("%p no data in mem, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId); } + if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { @@ -1190,6 +1268,7 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p return code; } + // return error, add test cases if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { return code; @@ -1452,186 +1531,204 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity return numOfRows + num; } -static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SMemRow row, - int32_t numOfCols, STable* pTable, STSchema* pSchema) { +// Note: row1 always has high priority +static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, + SMemRow row1, SMemRow row2, int32_t numOfCols, STable* pTable, + STSchema* pSchema1, STSchema* pSchema2, bool forceSetNull) { char* pData = NULL; + STSchema* pSchema; + SMemRow row; + int16_t colId; + int16_t offset; - // the schema version info is embedded in SDataRow, and use latest schema version for SKVRow - int32_t numOfRowCols = 0; - if (pSchema == NULL) { - pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); - numOfRowCols = schemaNCols(pSchema); + bool isRow1DataRow = isDataRow(row1); + bool isRow2DataRow; + bool isChosenRowDataRow; + int32_t chosen_itr; + void *value; + + // the schema version info is embeded in SDataRow + int32_t numOfColsOfRow1 = 0; + + if (pSchema1 == NULL) { + pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1)); + } + if(isRow1DataRow) { + numOfColsOfRow1 = schemaNCols(pSchema1); } else { - numOfRowCols = schemaNCols(pSchema); + numOfColsOfRow1 = kvRowNCols(memRowKvBody(row1)); } - int32_t i = 0; - - if (isDataRow(row)) { - SDataRow dataRow = memRowDataBody(row); - int32_t j = 0; - while (i < numOfCols && j < numOfRowCols) { - SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); - if (pSchema->columns[j].colId < pColInfo->info.colId) { - j++; - continue; - } - - if (ASCENDING_TRAVERSE(pQueryHandle->order)) { - pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; - } else { - pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; - } - - if (pSchema->columns[j].colId == pColInfo->info.colId) { - void* value = - tdGetRowDataOfCol(dataRow, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); - switch (pColInfo->info.type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(pData, value, varDataTLen(value)); - break; - case TSDB_DATA_TYPE_NULL: - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *(uint8_t*)pData = *(uint8_t*)value; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *(uint16_t*)pData = *(uint16_t*)value; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *(uint32_t*)pData = *(uint32_t*)value; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *(uint64_t*)pData = *(uint64_t*)value; - break; - case TSDB_DATA_TYPE_FLOAT: - SET_FLOAT_PTR(pData, value); - break; - case TSDB_DATA_TYPE_DOUBLE: - SET_DOUBLE_PTR(pData, value); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - *(TSKEY*)pData = tdGetKey(*(TKEY*)value); - } else { - *(TSKEY*)pData = *(TSKEY*)value; - } - break; - default: - memcpy(pData, value, pColInfo->info.bytes); - } - - j++; - i++; - } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data - if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pData, pColInfo->info.type); - } else { - setNull(pData, pColInfo->info.type, pColInfo->info.bytes); - } - i++; - } + int32_t numOfColsOfRow2 = 0; + if(row2) { + isRow2DataRow = isDataRow(row2); + if (pSchema2 == NULL) { + pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2)); } - } else if (isKvRow(row)) { - SKVRow kvRow = memRowKvBody(row); - int32_t k = 0; - int32_t nKvRowCols = kvRowNCols(kvRow); - - while (i < numOfCols && k < nKvRowCols) { - SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); - SColIdx* pColIdx = kvRowColIdxAt(kvRow, k); - - if (pColIdx->colId < pColInfo->info.colId) { - ++k; - continue; - } - - if (ASCENDING_TRAVERSE(pQueryHandle->order)) { - pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; - } else { - pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; - } - - if (pColIdx->colId == pColInfo->info.colId) { - // offset of pColIdx for SKVRow including the TD_KV_ROW_HEAD_SIZE - void* value = tdGetKvRowDataOfCol(kvRow, pColIdx->offset); - switch (pColInfo->info.type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(pData, value, varDataTLen(value)); - break; - case TSDB_DATA_TYPE_NULL: - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *(uint8_t*)pData = *(uint8_t*)value; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *(uint16_t*)pData = *(uint16_t*)value; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *(uint32_t*)pData = *(uint32_t*)value; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *(uint64_t*)pData = *(uint64_t*)value; - break; - case TSDB_DATA_TYPE_FLOAT: - SET_FLOAT_PTR(pData, value); - break; - case TSDB_DATA_TYPE_DOUBLE: - SET_DOUBLE_PTR(pData, value); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - *(TSKEY*)pData = tdGetKey(*(TKEY*)value); - } else { - *(TSKEY*)pData = *(TSKEY*)value; - } - break; - default: - memcpy(pData, value, pColInfo->info.bytes); - } - ++k; - ++i; - continue; - } - // If (pColInfo->info.colId < pColIdx->colId), it is NULL data - if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pData, pColInfo->info.type); - } else { - setNull(pData, pColInfo->info.type, pColInfo->info.bytes); - } - ++i; + if(isRow2DataRow) { + numOfColsOfRow2 = schemaNCols(pSchema2); + } else { + numOfColsOfRow2 = kvRowNCols(memRowKvBody(row2)); } - } else { - ASSERT(0); } - while (i < numOfCols) { // the remain columns are all null data + + int32_t i = 0, j = 0, k = 0; + while(i < numOfCols && (j < numOfColsOfRow1 || k < numOfColsOfRow2)) { SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; } else { pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; } - if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pData, pColInfo->info.type); + int32_t colIdOfRow1; + if(j >= numOfColsOfRow1) { + colIdOfRow1 = INT32_MAX; + } else if(isRow1DataRow) { + colIdOfRow1 = pSchema1->columns[j].colId; } else { - setNull(pData, pColInfo->info.type, pColInfo->info.bytes); + void *rowBody = memRowKvBody(row1); + SColIdx *pColIdx = kvRowColIdxAt(rowBody, j); + colIdOfRow1 = pColIdx->colId; } - i++; + int32_t colIdOfRow2; + if(k >= numOfColsOfRow2) { + colIdOfRow2 = INT32_MAX; + } else if(isRow2DataRow) { + colIdOfRow2 = pSchema2->columns[k].colId; + } else { + void *rowBody = memRowKvBody(row2); + SColIdx *pColIdx = kvRowColIdxAt(rowBody, k); + colIdOfRow2 = pColIdx->colId; + } + + if(colIdOfRow1 == colIdOfRow2) { + if(colIdOfRow1 < pColInfo->info.colId) { + j++; + k++; + continue; + } + row = row1; + pSchema = pSchema1; + isChosenRowDataRow = isRow1DataRow; + chosen_itr = j; + } else if(colIdOfRow1 < colIdOfRow2) { + if(colIdOfRow1 < pColInfo->info.colId) { + j++; + continue; + } + row = row1; + pSchema = pSchema1; + isChosenRowDataRow = isRow1DataRow; + chosen_itr = j; + } else { + if(colIdOfRow2 < pColInfo->info.colId) { + k++; + continue; + } + row = row2; + pSchema = pSchema2; + chosen_itr = k; + isChosenRowDataRow = isRow2DataRow; + } + if(isChosenRowDataRow) { + colId = pSchema->columns[chosen_itr].colId; + offset = pSchema->columns[chosen_itr].offset; + void *rowBody = memRowDataBody(row); + value = tdGetRowDataOfCol(rowBody, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset); + } else { + void *rowBody = memRowKvBody(row); + SColIdx *pColIdx = kvRowColIdxAt(rowBody, chosen_itr); + colId = pColIdx->colId; + offset = pColIdx->offset; + value = tdGetKvRowDataOfCol(rowBody, pColIdx->offset); + } + + + if (colId == pColInfo->info.colId) { + if(forceSetNull || (!isNull(value, (int8_t)pColInfo->info.type))) { + switch (pColInfo->info.type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t *)pData = *(uint8_t *)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t *)pData = *(uint16_t *)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t *)pData = *(uint32_t *)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t *)pData = *(uint64_t *)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY *)pData = tdGetKey(*(TKEY *)value); + } else { + *(TSKEY *)pData = *(TSKEY *)value; + } + break; + default: + memcpy(pData, value, pColInfo->info.bytes); + } + } + i++; + + if(row == row1) { + j++; + } else { + k++; + } + } else { + if(forceSetNull) { + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull(pData, pColInfo->info.type); + } else { + setNull(pData, pColInfo->info.type, pColInfo->info.bytes); + } + } + i++; + } + } + + if(forceSetNull) { + while (i < numOfCols) { // the remain columns are all null data + SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; + } else { + pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; + } + + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull(pData, pColInfo->info.type); + } else { + setNull(pData, pColInfo->info.type, pColInfo->info.bytes); + } + + i++; + } } } + static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols) { if (numOfRows == 0 || ASCENDING_TRAVERSE(pQueryHandle->order)) { return; @@ -1798,8 +1895,10 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* // compared with the data from in-memory buffer, to generate the correct timestamp array list int32_t numOfRows = 0; - int16_t rv = -1; - STSchema* pSchema = NULL; + int16_t rv1 = -1; + int16_t rv2 = -1; + STSchema* pSchema1 = NULL; + STSchema* pSchema2 = NULL; int32_t pos = cur->pos; cur->win = TSWINDOW_INITIALIZER; @@ -1811,12 +1910,13 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) { SSkipListNode* node = NULL; do { - SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); - if (row == NULL) { + SMemRow row2 = NULL; + SMemRow row1 = getSMemRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update, &row2); + if (row1 == NULL) { break; } - TSKEY key = memRowKey(row); + TSKEY key = memRowKey(row1); if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) { break; @@ -1829,12 +1929,16 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) { - if (rv != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); - rv = memRowVersion(row); + if (rv1 != memRowVersion(row1)) { + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); + rv1 = memRowVersion(row1); } - - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); + if(row2 && rv2 != memRowVersion(row2)) { + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); + rv2 = memRowVersion(row2); + } + + mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, true); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -1847,12 +1951,20 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* moveToNextRowInMem(pCheckInfo); } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it if (pCfg->update) { - if (rv != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); - rv = memRowVersion(row); + if(pCfg->update == TD_ROW_PARTIAL_UPDATE) { + doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, pos, pos); } - - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); + if (rv1 != memRowVersion(row1)) { + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); + rv1 = memRowVersion(row1); + } + if(row2 && rv2 != memRowVersion(row2)) { + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); + rv2 = memRowVersion(row2); + } + + bool forceSetNull = pCfg->update != TD_ROW_PARTIAL_UPDATE; + mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, forceSetNull); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -1877,7 +1989,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* assert(end != -1); if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it - if (!pCfg->update) { + if (pCfg->update == TD_ROW_DISCARD_UPDATE) { moveToNextRowInMem(pCheckInfo); } else { end -= step; @@ -2490,7 +2602,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int STSchema* pSchema = NULL; do { - SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); + SMemRow row = getSMemRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update, NULL); if (row == NULL) { break; } @@ -2512,7 +2624,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); rv = memRowVersion(row); } - copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, numOfCols, pTable, pSchema); + mergeTwoRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, NULL, numOfCols, pTable, pSchema, NULL, true); if (++numOfRows >= maxRowsToRead) { moveToNextRowInMem(pCheckInfo); @@ -2637,7 +2749,7 @@ static bool loadCachedLastRow(STsdbQueryHandle* pQueryHandle) { if (ret != TSDB_CODE_SUCCESS) { return false; } - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj, NULL); + mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, NULL, numOfCols, pCheckInfo->pTableObj, NULL, NULL, true); tfree(pRow); // update the last key value @@ -2920,7 +3032,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM } } - SArray* row = (type == TSDB_PREV_ROW)? pQueryHandle->prev:pQueryHandle->next; + SArray* row = (type == TSDB_PREV_ROW)? pQueryHandle->prev : pQueryHandle->next; for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); @@ -3787,10 +3899,6 @@ static void* doFreeColumnInfoData(SArray* pColumnInfoData) { } static void* destroyTableCheckInfo(SArray* pTableCheckInfo) { - if (pTableCheckInfo == NULL) { - return NULL; - } - size_t size = taosArrayGetSize(pTableCheckInfo); for (int32_t i = 0; i < size; ++i) { STableCheckInfo* p = taosArrayGet(pTableCheckInfo, i); @@ -3834,6 +3942,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); SIOCostSummary* pCost = &pQueryHandle->cost; + tsdbDebug("%p :io-cost summary: head-file read cnt:%"PRIu64", head-file time:%"PRIu64" us, statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64, pQueryHandle, pCost->headFileLoad, pCost->headFileLoadTime, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId); diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index dd14dc700f..666a2d3571 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -244,6 +244,7 @@ int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) { int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { ASSERT(pBlock->numOfSubBlocks > 0); + int8_t update = pReadh->pRepo->config.update; SBlock *iBlock = pBlock; if (pBlock->numOfSubBlocks > 1) { @@ -258,7 +259,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1; - if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, update != TD_ROW_PARTIAL_UPDATE) < 0) return -1; } ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); @@ -270,6 +271,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, int16_t *colIds, int numOfColsIds) { ASSERT(pBlock->numOfSubBlocks > 0); + int8_t update = pReadh->pRepo->config.update; SBlock *iBlock = pBlock; if (pBlock->numOfSubBlocks > 1) { @@ -284,7 +286,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1; - if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, update != TD_ROW_PARTIAL_UPDATE) < 0) return -1; } ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); @@ -657,4 +659,4 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc } return 0; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbRowMergeBuf.c b/src/tsdb/src/tsdbRowMergeBuf.c new file mode 100644 index 0000000000..5ce580f70f --- /dev/null +++ b/src/tsdb/src/tsdbRowMergeBuf.c @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tsdbRowMergeBuf.h" +#include "tdataformat.h" + +// row1 has higher priority +SMemRow tsdbMergeTwoRows(SMergeBuf *pBuf, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) { + if(row2 == NULL) return row1; + if(row1 == NULL) return row2; + ASSERT(pSchema1->version == memRowVersion(row1)); + ASSERT(pSchema2->version == memRowVersion(row2)); + + if(tsdbMergeBufMakeSureRoom(pBuf, pSchema1, pSchema2) < 0) { + return NULL; + } + return mergeTwoMemRows(*pBuf, row1, row2, pSchema1, pSchema2); +} diff --git a/src/util/inc/tfunctional.h b/src/util/inc/tfunctional.h new file mode 100644 index 0000000000..70f54e921d --- /dev/null +++ b/src/util/inc/tfunctional.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef TD_TFUNCTIONAL_H +#define TD_TFUNCTIONAL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "os.h" + +//TODO: hard to use, trying to rewrite it using va_list + +typedef void* (*GenericVaFunc)(void* args[]); +typedef int32_t (*I32VaFunc) (void* args[]); +typedef void (*VoidVaFunc) (void* args[]); + +typedef struct GenericSavedFunc { + GenericVaFunc func; + void * args[]; +} tGenericSavedFunc; + +typedef struct I32SavedFunc { + I32VaFunc func; + void * args[]; +} tI32SavedFunc; + +typedef struct VoidSavedFunc { + VoidVaFunc func; + void * args[]; +} tVoidSavedFunc; + +tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs); +tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs); +tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs); +void* genericInvoke(tGenericSavedFunc* const pSavedFunc); +int32_t i32Invoke(tI32SavedFunc* const pSavedFunc); +void voidInvoke(tVoidSavedFunc* const pSavedFunc); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h index 17f5940b49..d9dc001ccd 100644 --- a/src/util/inc/tskiplist.h +++ b/src/util/inc/tskiplist.h @@ -23,6 +23,7 @@ extern "C" { #include "os.h" #include "taosdef.h" #include "tarray.h" +#include "tfunctional.h" #define MAX_SKIP_LIST_LEVEL 15 #define SKIP_LIST_RECORD_PERFORMANCE 0 @@ -30,13 +31,17 @@ extern "C" { // For key property setting #define SL_ALLOW_DUP_KEY (uint8_t)0x0 // Allow duplicate key exists (for tag index usage) #define SL_DISCARD_DUP_KEY (uint8_t)0x1 // Discard duplicate key (for data update=0 case) -#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update=1 case) +#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update!=0 case) + // For thread safety setting #define SL_THREAD_SAFE (uint8_t)0x4 typedef char *SSkipListKey; typedef char *(*__sl_key_fn_t)(const void *); +typedef void (*sl_patch_row_fn_t)(void * pDst, const void * pSrc); +typedef void* (*iter_next_fn_t)(void *iter); + typedef struct SSkipListNode { uint8_t level; void * pData; @@ -95,6 +100,12 @@ typedef struct tSkipListState { uint64_t nTotalElapsedTimeForInsert; } tSkipListState; +typedef enum { + SSkipListPutSuccess = 0, + SSkipListPutEarlyStop = 1, + SSkipListPutSkipOne = 2 +} SSkipListPutStatus; + typedef struct SSkipList { unsigned int seed; __compar_fn_t comparFn; @@ -111,6 +122,7 @@ typedef struct SSkipList { #if SKIP_LIST_RECORD_PERFORMANCE tSkipListState state; // skiplist state #endif + tGenericSavedFunc* insertHandleFn; } SSkipList; typedef struct SSkipListIterator { @@ -118,7 +130,7 @@ typedef struct SSkipListIterator { SSkipListNode *cur; int32_t step; // the number of nodes that have been checked already int32_t order; // order of the iterator - SSkipListNode *next; // next points to the true qualified node in skip list + SSkipListNode *next; // next points to the true qualified node in skiplist } SSkipListIterator; #define SL_IS_THREAD_SAFE(s) (((s)->flags) & SL_THREAD_SAFE) @@ -132,7 +144,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ __sl_key_fn_t fn); void tSkipListDestroy(SSkipList *pSkipList); SSkipListNode * tSkipListPut(SSkipList *pSkipList, void *pData); -void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata); +void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t iterate); SArray * tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey); void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel); SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList); diff --git a/src/util/src/tfunctional.c b/src/util/src/tfunctional.c new file mode 100644 index 0000000000..c470a2b8ae --- /dev/null +++ b/src/util/src/tfunctional.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tfunctional.h" +#include "tarray.h" + + +tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs) { + tGenericSavedFunc* pSavedFunc = malloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*))); + pSavedFunc->func = func; + return pSavedFunc; +} + +tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs) { + tI32SavedFunc* pSavedFunc = malloc(sizeof(tI32SavedFunc) + numOfArgs * sizeof(void *)); + pSavedFunc->func = func; + return pSavedFunc; +} + +tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs) { + tVoidSavedFunc* pSavedFunc = malloc(sizeof(tVoidSavedFunc) + numOfArgs * sizeof(void*)); + pSavedFunc->func = func; + return pSavedFunc; +} + +FORCE_INLINE void* genericInvoke(tGenericSavedFunc* const pSavedFunc) { + return pSavedFunc->func(pSavedFunc->args); +} + +FORCE_INLINE int32_t i32Invoke(tI32SavedFunc* const pSavedFunc) { + return pSavedFunc->func(pSavedFunc->args); +} + +FORCE_INLINE void voidInvoke(tVoidSavedFunc* const pSavedFunc) { + if(pSavedFunc) pSavedFunc->func(pSavedFunc->args); +} diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 082b454bb5..b464519ba6 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -16,6 +16,7 @@ #include "tskiplist.h" #include "os.h" #include "tcompare.h" +#include "tdataformat.h" #include "tulog.h" #include "tutil.h" @@ -31,6 +32,7 @@ static SSkipListNode *tSkipListNewNode(uint8_t level); static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward, bool hasDup); + static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList); static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList); static FORCE_INLINE int tSkipListUnlock(SSkipList *pSkipList); @@ -80,6 +82,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ #if SKIP_LIST_RECORD_PERFORMANCE pSkipList->state.nTotalMemSize += sizeof(SSkipList); #endif + pSkipList->insertHandleFn = NULL; return pSkipList; } @@ -97,6 +100,8 @@ void tSkipListDestroy(SSkipList *pSkipList) { tSkipListFreeNode(pTemp); } + tfree(pSkipList->insertHandleFn); + tSkipListUnlock(pSkipList); if (pSkipList->lock != NULL) { pthread_rwlock_destroy(pSkipList->lock); @@ -124,8 +129,7 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) { return pNode; } -// Put a batch of data into skiplist. The batch of data must be in ascending order -void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { +void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t iterate) { SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0}; SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; bool hasDup = false; @@ -135,17 +139,21 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { tSkipListWLock(pSkipList); + void* pData = iterate(iter); + if(pData == NULL) return; + // backward to put the first data - hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]); - tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup); + hasDup = tSkipListGetPosToPut(pSkipList, backward, pData); + + tSkipListPutImpl(pSkipList, pData, backward, false, hasDup); for (int level = 0; level < pSkipList->maxLevel; level++) { forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level); } // forward to put the rest of data - for (int idata = 1; idata < ndata; idata++) { - pDataKey = pSkipList->keyFn(ppData[idata]); + while ((pData = iterate(iter)) != NULL) { + pDataKey = pSkipList->keyFn(pData); hasDup = false; // Compare max key @@ -186,9 +194,8 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { } } - tSkipListPutImpl(pSkipList, ppData[idata], forward, true, hasDup); + tSkipListPutImpl(pSkipList, pData, forward, true, hasDup); } - tSkipListUnlock(pSkipList); } @@ -661,18 +668,40 @@ static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipL uint8_t dupMode = SL_DUP_MODE(pSkipList); SSkipListNode *pNode = NULL; - if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) { + if (hasDup && (dupMode != SL_ALLOW_DUP_KEY)) { if (dupMode == SL_UPDATE_DUP_KEY) { if (isForward) { pNode = SL_NODE_GET_FORWARD_POINTER(direction[0], 0); } else { pNode = SL_NODE_GET_BACKWARD_POINTER(direction[0], 0); } - atomic_store_ptr(&(pNode->pData), pData); + if (pSkipList->insertHandleFn) { + pSkipList->insertHandleFn->args[0] = pData; + pSkipList->insertHandleFn->args[1] = pNode->pData; + pData = genericInvoke(pSkipList->insertHandleFn); + } + if(pData) { + atomic_store_ptr(&(pNode->pData), pData); + } + } else { + //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows! + if(pSkipList->insertHandleFn) { + pSkipList->insertHandleFn->args[0] = NULL; + pSkipList->insertHandleFn->args[1] = NULL; + genericInvoke(pSkipList->insertHandleFn); + } } } else { pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList)); if (pNode != NULL) { + // insertHandleFn will be assigned only for timeseries data, + // in which case, pData is pointed to an memory to be freed later; + // while for metadata, the mem alloc will not be called. + if (pSkipList->insertHandleFn) { + pSkipList->insertHandleFn->args[0] = pData; + pSkipList->insertHandleFn->args[1] = NULL; + pData = genericInvoke(pSkipList->insertHandleFn); + } pNode->pData = pData; tSkipListDoInsert(pSkipList, direction, pNode, isForward);