From 9c4c8493ad66330c64fce694ec12ac352e8303e5 Mon Sep 17 00:00:00 2001 From: SunShine Chan Date: Sat, 17 Jul 2021 12:11:32 +0800 Subject: [PATCH 01/38] Update docs.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit REMOVE "vnode之间的同步复制仅仅企业版支持" --- documentation20/cn/03.architecture/docs.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md index d22855198a..b481bea9f8 100644 --- a/documentation20/cn/03.architecture/docs.md +++ b/documentation20/cn/03.architecture/docs.md @@ -323,8 +323,6 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存 采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。 -注:vnode之间的同步复制仅仅企业版支持 - ## 缓存与持久化 ### 缓存 From 5cdbcdafc98aab69b595ee8c100f42fad0428479 Mon Sep 17 00:00:00 2001 From: SunShine Chan Date: Sat, 17 Jul 2021 12:13:08 +0800 Subject: [PATCH 02/38] Update docs.md REMOVE "Note: synchronous replication between vnodes is only supported in Enterprise Edition" --- documentation20/en/03.architecture/docs.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index 2e5fc7bd18..92813da2c4 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -322,8 +322,6 @@ For scenarios with higher data consistency requirements, asynchronous data repli With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication. -Note: synchronous replication between vnodes is only supported in Enterprise Edition - ## Caching and Persistence ### Caching From 1bdd6838724d0fa00f4948bd3c702c59b97fe44a Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Thu, 22 Jul 2021 19:59:36 +0800 Subject: [PATCH 03/38] [TD-5369] taosdemo test case for nano support , the sql files is must be here! --- tests/pytest/fulltest.sh | 8 + .../tools/taosdemoAllTest/nano_samples.csv | 100 +++++++++++ .../tools/taosdemoAllTest/nano_sampletags.csv | 100 +++++++++++ .../taosdemoTestNanoCreateDB.sql | 7 + .../taosdemoTestNanoDatabase.json | 88 ++++++++++ .../taosdemoTestNanoDatabaseInsertForSub.json | 84 +++++++++ .../taosdemoTestNanoDatabaseNow.json | 62 +++++++ .../taosdemoTestNanoDatabasecsv.json | 84 +++++++++ .../taosdemoTestSupportNanoInsert.py | 162 ++++++++++++++++++ .../taosdemoTestSupportNanoQuery.json | 92 ++++++++++ .../taosdemoTestSupportNanoQuery.py | 157 +++++++++++++++++ .../taosdemoTestSupportNanoQuerycsv.json | 110 ++++++++++++ .../taosdemoTestSupportNanoSubscribe.json | 32 ++++ .../taosdemoTestSupportNanosubscribe.py | 123 +++++++++++++ 14 files changed, 1209 insertions(+) create mode 100644 tests/pytest/tools/taosdemoAllTest/nano_samples.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 80d45fbc31..99a15e2e71 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -361,4 +361,12 @@ python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_cacheLastRow.py python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py + +# nano support +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py + + #======================p4-end=============== +python3 test.py -f tools/taosdemoAllTest/pytest.py \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/nano_samples.csv b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv new file mode 100644 index 0000000000..5fc779b41b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv @@ -0,0 +1,100 @@ +8.855,"binary_str0" ,1626870128248246976 +8.75,"binary_str1" ,1626870128249060032 +5.44,"binary_str2" ,1626870128249067968 +8.45,"binary_str3" ,1626870128249072064 +4.07,"binary_str4" ,1626870128249075904 +6.97,"binary_str5" ,1626870128249078976 +6.86,"binary_str6" ,1626870128249082048 +1.585,"binary_str7" ,1626870128249085120 +1.4,"binary_str8" ,1626870128249087936 +5.135,"binary_str9" ,1626870128249092032 +3.15,"binary_str10" ,1626870128249095104 +1.765,"binary_str11" ,1626870128249097920 +7.71,"binary_str12" ,1626870128249100992 +3.91,"binary_str13" ,1626870128249104064 +5.615,"binary_str14" ,1626870128249106880 +9.495,"binary_str15" ,1626870128249109952 +3.825,"binary_str16" ,1626870128249113024 +1.94,"binary_str17" ,1626870128249117120 +5.385,"binary_str18" ,1626870128249119936 +7.075,"binary_str19" ,1626870128249123008 +5.715,"binary_str20" ,1626870128249126080 +1.83,"binary_str21" ,1626870128249128896 +6.365,"binary_str22" ,1626870128249131968 +6.55,"binary_str23" ,1626870128249135040 +6.315,"binary_str24" ,1626870128249138112 +3.82,"binary_str25" ,1626870128249140928 +2.455,"binary_str26" ,1626870128249145024 +7.795,"binary_str27" ,1626870128249148096 +2.47,"binary_str28" ,1626870128249150912 +1.37,"binary_str29" ,1626870128249155008 +5.39,"binary_str30" ,1626870128249158080 +5.13,"binary_str31" ,1626870128249160896 +4.09,"binary_str32" ,1626870128249163968 +5.855,"binary_str33" ,1626870128249167040 +0.17,"binary_str34" ,1626870128249170112 +1.955,"binary_str35" ,1626870128249173952 +0.585,"binary_str36" ,1626870128249178048 +0.33,"binary_str37" ,1626870128249181120 +7.925,"binary_str38" ,1626870128249183936 +9.685,"binary_str39" ,1626870128249187008 +2.6,"binary_str40" ,1626870128249191104 +5.705,"binary_str41" ,1626870128249193920 +3.965,"binary_str42" ,1626870128249196992 +4.43,"binary_str43" ,1626870128249200064 +8.73,"binary_str44" ,1626870128249202880 +3.105,"binary_str45" ,1626870128249205952 +9.39,"binary_str46" ,1626870128249209024 +2.825,"binary_str47" ,1626870128249212096 +9.675,"binary_str48" ,1626870128249214912 +9.99,"binary_str49" ,1626870128249217984 +4.51,"binary_str50" ,1626870128249221056 +4.94,"binary_str51" ,1626870128249223872 +7.72,"binary_str52" ,1626870128249226944 +4.135,"binary_str53" ,1626870128249231040 +2.325,"binary_str54" ,1626870128249234112 +4.585,"binary_str55" ,1626870128249236928 +8.76,"binary_str56" ,1626870128249240000 +4.715,"binary_str57" ,1626870128249243072 +0.56,"binary_str58" ,1626870128249245888 +5.35,"binary_str59" ,1626870128249249984 +5.075,"binary_str60" ,1626870128249253056 +6.665,"binary_str61" ,1626870128249256128 +7.13,"binary_str62" ,1626870128249258944 +2.775,"binary_str63" ,1626870128249262016 +5.775,"binary_str64" ,1626870128249265088 +1.62,"binary_str65" ,1626870128249267904 +1.625,"binary_str66" ,1626870128249270976 +8.15,"binary_str67" ,1626870128249274048 +0.75,"binary_str68" ,1626870128249277120 +3.265,"binary_str69" ,1626870128249280960 +8.585,"binary_str70" ,1626870128249284032 +1.88,"binary_str71" ,1626870128249287104 +8.44,"binary_str72" ,1626870128249289920 +5.12,"binary_str73" ,1626870128249295040 +2.58,"binary_str74" ,1626870128249298112 +9.42,"binary_str75" ,1626870128249300928 +1.765,"binary_str76" ,1626870128249304000 +2.66,"binary_str77" ,1626870128249308096 +1.405,"binary_str78" ,1626870128249310912 +5.595,"binary_str79" ,1626870128249315008 +2.28,"binary_str80" ,1626870128249318080 +9.24,"binary_str81" ,1626870128249320896 +9.03,"binary_str82" ,1626870128249323968 +6.055,"binary_str83" ,1626870128249327040 +1.74,"binary_str84" ,1626870128249330112 +5.77,"binary_str85" ,1626870128249332928 +1.97,"binary_str86" ,1626870128249336000 +0.3,"binary_str87" ,1626870128249339072 +7.145,"binary_str88" ,1626870128249342912 +0.88,"binary_str89" ,1626870128249345984 +8.025,"binary_str90" ,1626870128249349056 +4.81,"binary_str91" ,1626870128249351872 +0.725,"binary_str92" ,1626870128249355968 +3.85,"binary_str93" ,1626870128249359040 +9.455,"binary_str94" ,1626870128249362112 +2.265,"binary_str95" ,1626870128249364928 +3.985,"binary_str96" ,1626870128249368000 +9.375,"binary_str97" ,1626870128249371072 +0.2,"binary_str98" ,1626870128249373888 +6.95,"binary_str99" ,1626870128249377984 diff --git a/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv new file mode 100644 index 0000000000..18fb855d6d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv @@ -0,0 +1,100 @@ +"string0",7,8.615 +"string1",4,9.895 +"string2",3,2.92 +"string3",3,5.62 +"string4",7,1.615 +"string5",6,1.45 +"string6",5,7.48 +"string7",7,3.01 +"string8",5,4.76 +"string9",10,7.09 +"string10",2,8.38 +"string11",7,8.65 +"string12",5,5.025 +"string13",10,5.765 +"string14",2,4.57 +"string15",2,1.03 +"string16",7,6.98 +"string17",10,0.23 +"string18",7,5.815 +"string19",1,2.37 +"string20",10,8.865 +"string21",3,1.235 +"string22",2,8.62 +"string23",9,1.045 +"string24",8,4.34 +"string25",1,5.455 +"string26",2,4.475 +"string27",1,6.95 +"string28",2,3.39 +"string29",3,6.79 +"string30",7,9.735 +"string31",1,9.79 +"string32",10,9.955 +"string33",1,5.095 +"string34",3,3.86 +"string35",9,5.105 +"string36",10,4.22 +"string37",1,2.78 +"string38",9,6.345 +"string39",1,0.975 +"string40",5,6.16 +"string41",4,7.735 +"string42",5,6.6 +"string43",8,2.845 +"string44",1,0.655 +"string45",3,2.995 +"string46",9,3.6 +"string47",8,3.47 +"string48",3,7.98 +"string49",6,2.225 +"string50",9,5.44 +"string51",4,6.335 +"string52",3,2.955 +"string53",1,0.565 +"string54",6,5.575 +"string55",6,9.905 +"string56",9,6.025 +"string57",8,0.94 +"string58",10,0.15 +"string59",8,1.555 +"string60",4,2.28 +"string61",2,8.29 +"string62",9,6.22 +"string63",6,3.35 +"string64",10,6.7 +"string65",3,9.345 +"string66",7,9.815 +"string67",1,5.365 +"string68",10,3.81 +"string69",1,6.405 +"string70",8,2.715 +"string71",3,8.58 +"string72",8,6.34 +"string73",2,7.49 +"string74",4,8.64 +"string75",3,8.995 +"string76",7,3.465 +"string77",1,7.64 +"string78",6,3.65 +"string79",6,1.4 +"string80",6,5.875 +"string81",2,1.22 +"string82",5,7.87 +"string83",9,8.41 +"string84",9,8.9 +"string85",9,3.89 +"string86",2,5.0 +"string87",2,4.495 +"string88",4,2.835 +"string89",3,5.895 +"string90",7,8.41 +"string91",5,5.125 +"string92",7,9.165 +"string93",5,8.315 +"string94",10,7.485 +"string95",7,4.635 +"string96",2,6.015 +"string97",8,0.595 +"string98",3,8.79 +"string99",4,1.72 diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql new file mode 100644 index 0000000000..e79e09592c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql @@ -0,0 +1,7 @@ +drop database if exists nsdbsql; +create database nsdbsql precision "ns" keep 36 days 6 update 1; +use nsdbsql; +CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); +CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); +INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); +INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76); diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json new file mode 100644 index 0000000000..9010415fe6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json new file mode 100644 index 0000000000..0726f3905d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "subnsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json new file mode 100644 index 0000000000..d2542a0eba --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json new file mode 100644 index 0000000000..867619ed8c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdbcsv", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py new file mode 100644 index 0000000000..010308d037 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -0,0 +1,162 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert data from a special timestamp + # check stable stb0 + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) + tdSql.execute("use nsdb") + tdSql.query("show stables") + tdSql.checkData(0, 4, 1000) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000000) + tdSql.query("describe stb0") + tdSql.getData(8, 1) + tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.getData(0, 0) + + # check stable stb1 which is insert with disord + + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb1_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10000000) + # check c8 is an nano timestamp + tdSql.query("describe stb1") + tdSql.checkDataType(8, 1,"TIMESTAMP") + # check insert timestamp_step is nano_second + tdSql.query("select last(ts) from stb1") + tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") + + # insert data from now time + + # check stable stb0 + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath) + tdSql.execute("use nsdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 1000) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + # check c8 is an nano timestamp + tdSql.query("describe stb0") + tdSql.checkDataType(8,1,"TIMESTAMP") + + # insert by csv files and timetamp is long int , strings in ts and cols + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") + tdSql.checkData(0, 0, 10000) + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") + + + + + + + + + + + # taosdemo test insert with command and parameter , detals show taosdemo --help + + os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 600) + + os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 600) + + # check taosdemo -s + + os.system("%staosdemo -s tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql -y " % binPath) + tdSql.query("select count(*) from nsdbsql.meters") + tdSql.checkData(0, 0, 2) + + + + os.system("rm -rf ./res.txt") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json new file mode 100644 index 0000000000..fff1017588 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json @@ -0,0 +1,92 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdb", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts>now-20d ;", + "result": "./query_res2.txt" + }, + { + "sql": "select max(c10) from stb0;", + "result": "./query_res3.txt" + }, + { + "sql": "select min(c1) from stb0;", + "result": "./query_res4.txt" + }, + { + "sql": "select avg(c1) from stb0;", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c10) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c1) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c10) from xxxx ;", + "result": "./query_res_tb7.txt" + + } + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py new file mode 100644 index 0000000000..21a7037ce6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py @@ -0,0 +1,157 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # query: query test for nanoSecond with where and max min groupby order + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) + + tdSql.execute("use nsdb") + + # use where to filter + + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") + tdSql.checkData(0, 0, 3999000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") + tdSql.checkData(0, 0, 1000000) + + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") + tdSql.checkData(0, 0, 3999) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") + tdSql.checkData(0, 0, 1000) + + + # select max min avg from special col + tdSql.query("select max(c10) from stb0;") + print("select max(c10) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select max(c10) from tb0_0;") + print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0)) + + + tdSql.query("select min(c1) from stb0;") + print( "select min(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select min(c1) from tb0_0;") + print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from stb0;") + print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from tb0_0;") + print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select count(*) from stb0 group by tbname;") + tdSql.checkData(0, 0, 10000) + tdSql.checkData(100, 0, 10000) + + # query : query above sqls by taosdemo and continuously + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json -y " % binPath) + + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") + tdSql.checkData(0, 0, 10000) + tdSql.execute('select count(*) from stb0 where c2 > 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"') + tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000') + tdSql.query('select avg(c0) from stb0 interval(5000000000b)') + tdSql.checkRows(1) + + tdSql.query('select avg(c0) from stb0 interval(100000000b)') + tdSql.checkRows(10) + + tdSql.error('select avg(c0) from stb0 interval(1b)') + tdSql.error('select avg(c0) from stb0 interval(999b)') + + tdSql.query('select avg(c0) from stb0 interval(1000b)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(1u)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)') + tdSql.checkRows(10) + + # query : query above sqls by taosdemo and continuously + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json -y " % binPath) + + os.system("rm -rf ./query_res*.txt*") + os.system("rm -rf tools/taosdemoAllTest/*.py.sql") + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json new file mode 100644 index 0000000000..aa95837a33 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json @@ -0,0 +1,110 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdbcsv", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts > now -22d-1h-3s ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts < 1626918583000000000 ;", + "result": "./query_res2.txt" + }, + { + "sql": "select count(*) from stb0 where c2 <> 162687012800000000';", + "result": "./query_res3.txt" + }, + { + "sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";", + "result": "./query_res4.txt" + }, + { + "sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + }, + { + "sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;", + "result":"./query_res7.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(5000000000b);", + "result":"./query_res8.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);", + "result":"./query_res9.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c0) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c0) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c0) from xxxx ;", + "result": "./query_res_tb7.txt" + + }, + { + "sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;", + "result": "./query_res_tb8.txt" + + } + + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json new file mode 100644 index 0000000000..26d405b65b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json @@ -0,0 +1,32 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "subnsdb", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":10000, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb0 where ts > now -20d-1h-3s ;", + "result": "./subscribe_res1.txt" + }, + { + "sql": "select * from stb0 where ts < 1626918583000000000 ;", + "result": "./subscribe_res2.txt" + }] + + } +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py new file mode 100644 index 0000000000..f6324577c1 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py @@ -0,0 +1,123 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime +import subprocess + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # get the number of subscriptions + def subTimes(self,filename): + self.filename = filename + command = 'cat %s |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + # assert results + def assertCheck(self,filename,subResult,expectResult): + self.filename = filename + self.subResult = subResult + self.expectResult = expectResult + args0 = (filename, subResult, expectResult) + assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # clear env + os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe_res*") + + + # insert data + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) + os.system("nohup %staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json &" % binPath) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + + + # merge result files + sleep(20) + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") + os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") + + + # correct subscribeTimes testcase + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) + + subTimes1 = self.subTimes("all_subscribe_res1.txt") + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,0) + + subTimes2 = self.subTimes("all_subscribe_res2.txt") + self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) + + + # insert extral data + tdSql.execute("use subnsdb") + tdSql.execute("insert into tb0_0 values(now,100.1000,'subtest1',now-1s)") + sleep(1) + + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) + + + + # correct data testcase + os.system("kill -9 %d" % query_pid) + sleep(3) + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe*") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From c133eb1cfea4803c8b851eb4540de82f2bf5daf8 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Fri, 23 Jul 2021 10:07:02 +0800 Subject: [PATCH 04/38] [TD-5369] modify an error about insert --- .../tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 010308d037..76b5871de6 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -113,10 +113,6 @@ class TDTestCase: tdSql.checkDataType(3, 1, "TIMESTAMP") tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") - tdSql.checkData(0, 0, 10000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") - tdSql.checkData(0, 0, 10000) tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") tdSql.checkData(0, 0, 10000) From ea0cc35af4d2db6f92f0a0b74e511deb01d2633c Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 23 Jul 2021 14:49:15 +0800 Subject: [PATCH 05/38] [TD-5074]test operator --- tests/pytest/fulltest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index e452ed3de4..eb068b6585 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -241,6 +241,7 @@ python3 ./test.py -f query/queryStateWindow.py python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py +python3 ./test.py -f query/operator.py python3 test.py -f query/nestedQuery/queryWithSpread.py #stream From 11d5b3231d91d78609a0141ef3f3200579f1813d Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 23 Jul 2021 14:49:27 +0800 Subject: [PATCH 06/38] [TD-5074]test operator --- tests/pytest/query/operator.py | 539 +++++++++++++++++++++++++++++++++ 1 file changed, 539 insertions(+) create mode 100644 tests/pytest/query/operator.py diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py new file mode 100644 index 0000000000..b94d5fa3b3 --- /dev/null +++ b/tests/pytest/query/operator.py @@ -0,0 +1,539 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5074 + + startTime = time.time() + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + tdSql.execute('''create table table_21 using stable_2 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdLog.info("========== operator=1(OP_TableScan) ==========") + tdLog.info("========== operator=7(OP_Project) ==========") + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=6(OP_Aggregate) ==========") + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=9(OP_Limit) ==========") + sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' + tdSql.query(sql) + tdSql.checkRows(5) + sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=16(OP_DummyInput) ==========") + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + + tdLog.info("========== operator=10(OP_SLimit) ==========") + sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=20(OP_Distinct) ==========") + tdLog.info("========== operator=4(OP_TagScan) ==========") + sql = '''select distinct(t_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + sql = '''select distinct(loc) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + sql = '''select distinct(tbname) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + + tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") + sql = '''select last(q_int),first(q_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bool),first(q_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_binary),first(q_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_float),first(q_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_double),first(q_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_ts),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), + first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), + first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=8(OP_Groupby) ==========") + sql = '''select stddev(q_int) from table_0 group by q_int;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' + tdSql.query(sql) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' + tdSql.query(sql) + + tdLog.info("========== operator=11(OP_TimeWindow) ==========") + sql = '''select last(q_int) from table_0 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=12(OP_SessionWindow) ==========") + sql = '''select count(*) from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*) from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=13(OP_Fill) ==========") + sql = '''select sum(q_int) from table_0 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + #TD-5190 + sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + + tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") + sql = '''select avg(q_int) from stable_1 where ts=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having sum(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having avg(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having min(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having max(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having first(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having last(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + + tdLog.info("========== operator=21(OP_Join) ==========") + sql = '''select t1.q_int,t2.q_int from + (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from table_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from table_0) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from stable_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.*,t3.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 + where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + + tdLog.info("========== operator=22(OP_StateWindow) ==========") + sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 state_window(q_bigint);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 state_window(q_smallint);''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 8c62d84a3d5f1d52191bcf4a99a9b4d75b21eca9 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Fri, 23 Jul 2021 20:05:08 +0800 Subject: [PATCH 07/38] [TD-5369] add test case about taosdemo params 'time_step' --- tests/pytest/fulltest.sh | 1 + .../taosdemoAllTest/taosdemoInsertMSDB.json | 63 ++++++++++ .../taosdemoAllTest/taosdemoInsertNanoDB.json | 63 ++++++++++ .../taosdemoAllTest/taosdemoInsertUSDB.json | 63 ++++++++++ .../taosdemoTestInsertTime_step.py | 115 ++++++++++++++++++ .../taosdemoTestNanoCreateDB.sql | 7 -- .../taosdemoTestSupportNanoInsert.py | 36 +++--- 7 files changed, 324 insertions(+), 24 deletions(-) create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py delete mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c9dd6d8e79..ab7dedc959 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -368,6 +368,7 @@ python3 test.py -f alter/alter_create_exception.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py python3 ./test.py -f insert/flushwhiledrop.py #======================p4-end=============== diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json new file mode 100644 index 0000000000..49ab6f3a43 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb3", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json new file mode 100644 index 0000000000..9a35df917d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json new file mode 100644 index 0000000000..631179dbae --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "us", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py new file mode 100644 index 0000000000..7b3b865df9 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py @@ -0,0 +1,115 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + + # check the params of taosdemo about time_step is nano + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertNanoDB.json -y " % binPath) + tdSql.execute("use testdb1") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000") + + # check the params of taosdemo about time_step is us + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertUSDB.json -y " % binPath) + tdSql.execute("use testdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000") + + # check the params of taosdemo about time_step is ms + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertMSDB.json -y " % binPath) + tdSql.execute("use testdb3") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:01:39.000") + + + os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql deleted file mode 100644 index e79e09592c..0000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql +++ /dev/null @@ -1,7 +0,0 @@ -drop database if exists nsdbsql; -create database nsdbsql precision "ns" keep 36 days 6 update 1; -use nsdbsql; -CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); -CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); -INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); -INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76); diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 76b5871de6..88a917da85 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -64,8 +64,7 @@ class TDTestCase: tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 10000000) tdSql.query("describe stb0") - tdSql.getData(8, 1) - tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1,"TIMESTAMP") tdSql.query("select last(ts) from stb0") tdSql.getData(0, 0) @@ -79,7 +78,7 @@ class TDTestCase: tdSql.checkData(0, 0, 10000000) # check c8 is an nano timestamp tdSql.query("describe stb1") - tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1,"TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") @@ -99,7 +98,7 @@ class TDTestCase: tdSql.checkData(0, 0, 100000) # check c8 is an nano timestamp tdSql.query("describe stb0") - tdSql.checkDataType(8,1,"TIMESTAMP") + tdSql.checkDataType(9,1,"TIMESTAMP") # insert by csv files and timetamp is long int , strings in ts and cols @@ -118,16 +117,6 @@ class TDTestCase: os.system("rm -rf ./insert_res.txt") os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") - - - - - - - - - - # taosdemo test insert with command and parameter , detals show taosdemo --help os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) @@ -140,13 +129,26 @@ class TDTestCase: # check taosdemo -s - os.system("%staosdemo -s tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql -y " % binPath) + sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;', + 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', + 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] + + with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files: + for sql in sqls_ls: + sql_files.write(sql+"\n") + sql_files.close() + + sleep(10) + + os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) tdSql.query("select count(*) from nsdbsql.meters") tdSql.checkData(0, 0, 2) - - os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") def stop(self): From 2323d5a22868649cb109c0c938cbab510881eb88 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:33:54 +0800 Subject: [PATCH 08/38] [td-225] update the test script. --- tests/pytest/tools/taosdemoAllTest/querrThreads0.json | 4 ++-- tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json | 4 ++-- tests/script/sh/stop_dnodes.sh | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json index 69557a7841..3999845dec 100644 --- a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json +++ b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json @@ -7,7 +7,7 @@ "password": "taosdata", "confirm_parameter_prompt": "no", "databases": "db", - "query_times":3, + "query_times": 3, "specified_table_query": { "query_interval": 0, "concurrent": 1, @@ -34,4 +34,4 @@ ] } } - \ No newline at end of file + diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json index 9074ae8fd1..646cbcfbe2 100644 --- a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json +++ b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json @@ -7,7 +7,7 @@ "password": "taosdata", "confirm_parameter_prompt": "no", "databases": "db", - "query_times":3, + "query_times": 3, "specified_table_query": { "query_interval": 0, "concurrent": 1, @@ -34,4 +34,4 @@ ] } } - \ No newline at end of file + diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh index 430f39901e..4c6d8e0351 100755 --- a/tests/script/sh/stop_dnodes.sh +++ b/tests/script/sh/stop_dnodes.sh @@ -14,7 +14,7 @@ while [ -n "$PID" ]; do echo kill -9 $PID pkill -9 taosd echo "Killing processes locking on port 6030" - if [[ "$OS_TYPE" != "Darwin" ]]; then + if [ "$OS_TYPE" != "Darwin" ]; then fuser -k -n tcp 6030 else lsof -nti:6030 | xargs kill -9 @@ -26,7 +26,7 @@ PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'` while [ -n "$PID" ]; do echo kill -9 $PID pkill -9 tarbitrator - if [[ "$OS_TYPE" != "Darwin" ]]; then + if [ "$OS_TYPE" != "Darwin" ]; then fuser -k -n tcp 6040 else lsof -nti:6040 | xargs kill -9 From 74f0030fc9e4062fc3327af5e50c4d87b78e9fb9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:38:05 +0800 Subject: [PATCH 09/38] [td-225] fix cache log error bug. --- src/util/src/tcache.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 4aa5b4378f..69b3741e13 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -132,11 +132,11 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo return; } - pCacheObj->totalSize -= pNode->size; + atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size); int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable); assert(size > 0); - uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, totalNum:%d size:%" PRId64 "bytes", + uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, total num:%d size:%" PRId64 "bytes", pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize); if (pCacheObj->freeFp) { @@ -252,6 +252,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v pCacheObj->freeFp(p->data); } + atomic_sub_fetch_64(&pCacheObj->totalSize, p->size); tfree(p); } else { taosAddToTrashcan(pCacheObj, p); @@ -302,7 +303,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen } SCacheDataNode* ptNode = NULL; - taosHashGetClone(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode, sizeof(void*)); + taosHashGetClone(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode); void* pData = (ptNode != NULL)? ptNode->data:NULL; @@ -679,7 +680,7 @@ void* taosCacheTimedRefresh(void *handle) { assert(pCacheArrayList != NULL); uDebug("cache refresh thread starts"); - setThreadName("cacheTimedRefre"); + setThreadName("cacheRefresh"); const int32_t SLEEP_DURATION = 500; //500 ms int64_t count = 0; From 7409fda3a266f9090067ae8beb95163bed4b1d68 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:47:37 +0800 Subject: [PATCH 10/38] [td-225] update the thread name --- src/balance/src/bnThread.c | 2 +- src/dnode/src/dnodeTelemetry.c | 2 +- src/dnode/src/dnodeVRead.c | 7 ++++--- src/dnode/src/dnodeVnodes.c | 1 - src/plugins/monitor/src/monMain.c | 2 +- src/query/src/qExecutor.c | 2 ++ src/rpc/src/rpcTcp.c | 5 ++--- src/rpc/test/rclient.c | 2 -- src/rpc/test/rsclient.c | 4 +--- src/sync/src/syncRetrieve.c | 1 - src/sync/test/syncClient.c | 2 -- src/sync/test/syncServer.c | 2 +- src/util/src/tlog.c | 7 +------ src/util/src/tsched.c | 4 +++- src/wal/src/walMgmt.c | 2 +- 15 files changed, 18 insertions(+), 27 deletions(-) diff --git a/src/balance/src/bnThread.c b/src/balance/src/bnThread.c index c5dca2da85..20da83ccba 100644 --- a/src/balance/src/bnThread.c +++ b/src/balance/src/bnThread.c @@ -23,7 +23,7 @@ static SBnThread tsBnThread; static void *bnThreadFunc(void *arg) { - setThreadName("bnThreadd"); + setThreadName("balance"); while (1) { pthread_mutex_lock(&tsBnThread.mutex); diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c index 59b66879d4..22a6dc5b19 100644 --- a/src/dnode/src/dnodeTelemetry.c +++ b/src/dnode/src/dnodeTelemetry.c @@ -245,7 +245,7 @@ static void* telemetryThread(void* param) { clock_gettime(CLOCK_REALTIME, &end); end.tv_sec += 300; // wait 5 minutes before send first report - setThreadName("telemetryThrd"); + setThreadName("telemetry"); while (!tsExit) { int r = 0; diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index e8003a8fe7..c404ab1a55 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -118,10 +118,11 @@ static void *dnodeProcessReadQueue(void *wparam) { SVReadMsg * pRead; int32_t qtype; void * pVnode; - char name[16]; - memset(name, 0, 16); - snprintf(name, 16, "%s-dnReadQ", pPool->name); + char* threadname = strcmp(pPool->name, "vquery") == 0? "dnodeQueryQ":"dnodeFetchQ"; + + char name[16] = {0}; + snprintf(name, tListLen(name), "%s", threadname); setThreadName(name); while (1) { diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index 8ea8e280de..a5b0e9fe30 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -90,7 +90,6 @@ static void *dnodeOpenVnode(void *param) { char stepDesc[TSDB_STEP_DESC_LEN] = {0}; dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum); - setThreadName("dnodeOpenVnode"); for (int32_t v = 0; v < pThread->vnodeNum; ++v) { diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index 960a097f5d..6e583fe0df 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -114,7 +114,7 @@ int32_t monStartSystem() { static void *monThreadFunc(void *param) { monDebug("starting to initialize monitor module ..."); - setThreadName("monThrd"); + setThreadName("monitor"); while (1) { static int32_t accessTimes = 0; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 982c45c441..f14b83101b 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6554,9 +6554,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { return NULL; } + SDistinctOperatorInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; + pRes->info.rows = 0; SSDataBlock* pBlock = NULL; while(1) { diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index e9feeef9d3..0449ecac8b 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -529,10 +529,9 @@ static void *taosProcessTcpData(void *param) { SFdObj *pFdObj; struct epoll_event events[maxEvents]; SRecvInfo recvInfo; - char name[16]; - memset(name, 0, sizeof(name)); - snprintf(name, 16, "%s-tcpData", pThreadObj->label); + char name[16] = {0}; + snprintf(name, tListLen(name), "%s-tcp", pThreadObj->label); setThreadName(name); while (1) { diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index de30114bd1..2f4433f1bb 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -48,8 +48,6 @@ static void *sendRequest(void *param) { SInfo *pInfo = (SInfo *)param; SRpcMsg rpcMsg = {0}; - setThreadName("sendCliReq"); - tDebug("thread:%d, start to send request", pInfo->index); while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/rpc/test/rsclient.c b/src/rpc/test/rsclient.c index 3e94a56efb..65170d4abb 100644 --- a/src/rpc/test/rsclient.c +++ b/src/rpc/test/rsclient.c @@ -40,9 +40,7 @@ static int terror = 0; static void *sendRequest(void *param) { SInfo *pInfo = (SInfo *)param; SRpcMsg rpcMsg, rspMsg; - - setThreadName("sendSrvReq"); - + tDebug("thread:%d, start to send request", pInfo->index); while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 89fdda0686..c86ab85499 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -415,7 +415,6 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) { } void *syncRetrieveData(void *param) { - setThreadName("syncRetrievData"); int64_t rid = (int64_t)param; SSyncPeer *pPeer = syncAcquirePeer(rid); if (pPeer == NULL) { diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c index 303d2376ef..23ea54ee0c 100644 --- a/src/sync/test/syncClient.c +++ b/src/sync/test/syncClient.c @@ -48,8 +48,6 @@ void *sendRequest(void *param) { SInfo * pInfo = (SInfo *)param; SRpcMsg rpcMsg = {0}; - setThreadName("sendCliReq"); - uDebug("thread:%d, start to send request", pInfo->index); while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index a3d0696648..4598e16a9d 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -178,7 +178,7 @@ void *processWriteQueue(void *param) { int type; void *item; - setThreadName("writeQ"); + setThreadName("syncWrite"); while (1) { int ret = taosReadQitem(qhandle, &type, &item); diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 88f57e8ac2..1ce3eadf58 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -178,8 +178,6 @@ static void *taosThreadToOpenNewFile(void *param) { char keepName[LOG_FILE_NAME_LEN + 20]; sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag); - setThreadName("openNewFile"); - tsLogObj.flag ^= 1; tsLogObj.lines = 0; char name[LOG_FILE_NAME_LEN + 20]; @@ -689,12 +687,9 @@ static void taosWriteLog(SLogBuff *tLogBuff) { static void *taosAsyncOutputLog(void *param) { SLogBuff *tLogBuff = (SLogBuff *)param; - - setThreadName("asyncOutputLog"); + setThreadName("log"); while (1) { - //tsem_wait(&(tLogBuff->buffNotEmpty)); - taosMsleep(writeInterval); // Polling the buffer diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c index 3d3dfd9899..b86ebb38bc 100644 --- a/src/util/src/tsched.c +++ b/src/util/src/tsched.c @@ -122,7 +122,9 @@ void *taosProcessSchedQueue(void *scheduler) { SSchedQueue *pSched = (SSchedQueue *)scheduler; int ret = 0; - setThreadName("schedQ"); + char name[16] = {0}; + snprintf(name, tListLen(name), "%s-taskQ", pSched->label); + setThreadName(name); while (1) { if ((ret = tsem_wait(&pSched->fullSem)) != 0) { diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 45f65b2c2f..05324d31ee 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -192,7 +192,7 @@ static void walFsyncAll() { static void *walThreadFunc(void *param) { int stop = 0; - setThreadName("walThrd"); + setThreadName("wal"); while (1) { walUpdateSeq(); walFsyncAll(); From e422c53df76e1cec85945f30b6d5baa0ec261923 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:49:55 +0800 Subject: [PATCH 11/38] [td-225] fix a typo. --- src/util/tests/trefTest.c | 8 -------- src/vnode/src/vnodeWorker.c | 6 +++--- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/src/util/tests/trefTest.c b/src/util/tests/trefTest.c index fe3dcab201..e01da070af 100644 --- a/src/util/tests/trefTest.c +++ b/src/util/tests/trefTest.c @@ -35,8 +35,6 @@ void *addRef(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id; - setThreadName("addRef"); - for (int i=0; i < pSpace->steps; ++i) { printf("a"); id = random() % pSpace->refNum; @@ -54,8 +52,6 @@ void *removeRef(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id, code; - setThreadName("removeRef"); - for (int i=0; i < pSpace->steps; ++i) { printf("d"); id = random() % pSpace->refNum; @@ -74,8 +70,6 @@ void *acquireRelease(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id; - setThreadName("acquireRelease"); - for (int i=0; i < pSpace->steps; ++i) { printf("a"); @@ -97,8 +91,6 @@ void myfree(void *p) { void *openRefSpace(void *param) { SRefSpace *pSpace = (SRefSpace *)param; - setThreadName("openRefSpace"); - printf("c"); pSpace->rsetId = taosOpenRef(50, myfree); diff --git a/src/vnode/src/vnodeWorker.c b/src/vnode/src/vnodeWorker.c index e94c99cbea..7fcc393746 100644 --- a/src/vnode/src/vnodeWorker.c +++ b/src/vnode/src/vnodeWorker.c @@ -25,7 +25,7 @@ typedef enum { VNODE_WORKER_ACTION_CLEANUP, - VNODE_WORKER_ACTION_DESTROUY + VNODE_WORKER_ACTION_DESTROY } EVMWorkerAction; typedef struct { @@ -155,7 +155,7 @@ int32_t vnodeCleanupInMWorker(SVnodeObj *pVnode) { int32_t vnodeDestroyInMWorker(SVnodeObj *pVnode) { vTrace("vgId:%d, will destroy in vmworker", pVnode->vgId); - return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROUY, NULL); + return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROY, NULL); } static void vnodeFreeMWorkerMsg(SVMWorkerMsg *pMsg) { @@ -179,7 +179,7 @@ static void vnodeProcessMWorkerMsg(SVMWorkerMsg *pMsg) { case VNODE_WORKER_ACTION_CLEANUP: vnodeCleanUp(pMsg->pVnode); break; - case VNODE_WORKER_ACTION_DESTROUY: + case VNODE_WORKER_ACTION_DESTROY: vnodeDestroy(pMsg->pVnode); break; default: From 1073ac5160200136446f18fc9acf185b3107e629 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 14:55:22 +0800 Subject: [PATCH 12/38] [td-5407]: add a local buffer for super table vgroup id list to improve the query performance. --- src/client/inc/tsclient.h | 6 +- src/client/src/tscLocal.c | 2 +- src/client/src/tscParseLineProtocol.c | 2 +- src/client/src/tscSQLParser.c | 83 +++++++++---- src/client/src/tscServer.c | 163 ++++++++++++++------------ src/client/src/tscStream.c | 2 +- src/client/src/tscSubquery.c | 2 +- src/client/src/tscSystem.c | 76 +++++++----- src/client/src/tscUtil.c | 10 +- src/query/src/qScript.c | 1 + src/util/inc/hash.h | 3 +- src/util/src/hash.c | 4 +- src/vnode/src/vnodeMgmt.c | 2 +- 13 files changed, 210 insertions(+), 146 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 83ec28898c..9a627d5cd6 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -151,7 +151,8 @@ typedef struct STableDataBlocks { typedef struct { STableMeta *pTableMeta; - SVgroupsInfo *pVgroupInfo; + SArray *vgroupIdList; +// SVgroupsInfo *pVgroupsInfo; } STableMetaVgroupInfo; typedef struct SInsertStatementParam { @@ -415,7 +416,8 @@ int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo); int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows); extern int32_t sentinel; extern SHashObj *tscVgroupMap; -extern SHashObj *tscTableMetaInfo; +extern SHashObj *tscTableMetaMap; +extern SCacheObj *tscVgroupListBuf; extern int tscObjRef; extern void *tscTmr; diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index d1a325be35..641f62f22b 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -920,7 +920,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) { pRes->code = tscProcessShowCreateDatabase(pSql); } else if (pCmd->command == TSDB_SQL_RESET_CACHE) { - taosHashClear(tscTableMetaInfo); + taosHashClear(tscTableMetaMap); pRes->code = TSDB_CODE_SUCCESS; } else if (pCmd->command == TSDB_SQL_SERV_VERSION) { pRes->code = tscProcessServerVer(pSql); diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index cccc81274d..0386850f63 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -444,7 +444,7 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { uint32_t size = tscGetTableMetaMaxSize(); STableMeta* tableMeta = calloc(1, size); - taosHashGetClone(tscTableMetaInfo, fullTableName, strlen(fullTableName), NULL, tableMeta, -1); + taosHashGetClone(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, tableMeta); tstrncpy(schema->sTableName, tableName, strlen(tableName)+1); schema->precision = tableMeta->tableInfo.precision; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index c0627f4c31..36f252a9ba 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -80,8 +80,8 @@ static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawN static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult, SUdfInfo* pUdfInfo); -static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, - int8_t type, char* fieldName, SExprInfo* pSqlExpr); +static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pColList, int16_t bytes, + int8_t type, char* fieldName, SExprInfo* pSqlExpr); static uint8_t convertRelationalOperator(SStrToken *pToken); @@ -8113,6 +8113,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } } + pTableMeta = calloc(1, maxSize); plist = taosArrayInit(4, POINTER_BYTES); @@ -8128,9 +8129,16 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { size_t len = strlen(name); memset(pTableMeta, 0, maxSize); - taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMeta, -1); + taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMeta); if (pTableMeta->id.uid > 0) { + tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name); + + // avoid mem leak, may should update pTableMeta + const char* pTableName = tNameGetTableName(pname); + size_t nameLen = strlen(pTableName); + + void* pVgroupIdList = NULL; if (pTableMeta->tableType == TSDB_CHILD_TABLE) { code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, pSql->pBuf); @@ -8142,23 +8150,33 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { } } else if (pTableMeta->tableType == TSDB_SUPER_TABLE) { // the vgroup list of super table is not kept in local buffer, so here need retrieve it from the mnode each time - char* t = strdup(name); - taosArrayPush(pVgroupList, &t); + void* pv = taosCacheAcquireByKey(tscVgroupListBuf, pTableName, nameLen); + if (pv == NULL) { + char* t = strdup(name); + taosArrayPush(pVgroupList, &t); + tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, pTableName); + } else { + tFilePage* pdata = (tFilePage*) pv; + pVgroupIdList = taosArrayInit(pdata->num, sizeof(int32_t)); + if (pVgroupIdList == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + taosArrayAddBatch(pVgroupIdList, pdata->data, pdata->num); + taosCacheRelease(tscVgroupListBuf, &pv, false); + } } - //STableMeta* pMeta = tscTableMetaDup(pTableMeta); - //STableMetaVgroupInfo p = { .pTableMeta = pMeta }; - - //const char* px = tNameGetTableName(pname); - //taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo)); - // avoid mem leak, may should update pTableMeta - const char* px = tNameGetTableName(pname); - if (taosHashGet(pCmd->pTableMetaMap, px, strlen(px)) == NULL) { + if (taosHashGet(pCmd->pTableMetaMap, pTableName, nameLen) == NULL) { STableMeta* pMeta = tscTableMetaDup(pTableMeta); - STableMetaVgroupInfo p = { .pTableMeta = pMeta, .pVgroupInfo = NULL}; - taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo)); + STableMetaVgroupInfo tvi = { .pTableMeta = pMeta, .vgroupIdList = pVgroupIdList}; + taosHashPut(pCmd->pTableMetaMap, pTableName, nameLen, &tvi, sizeof(STableMetaVgroupInfo)); } - } else { // add to the retrieve table meta array list. + } else { + // Add to the retrieve table meta array list. + // If the tableMeta is missing, the cached vgroup list for the corresponding super table will be ignored. + tscDebug("0x%"PRIx64" failed to retrieve table meta %s from local buf", pSql->self, name); + char* t = strdup(name); taosArrayPush(plist, &t); } @@ -8278,16 +8296,37 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta); assert(pTableMetaInfo->pTableMeta != NULL); - if (p->pVgroupInfo != NULL) { - pTableMetaInfo->vgroupList = tscVgroupsInfoDup(p->pVgroupInfo); - } + if (p->vgroupIdList != NULL) { + size_t s = taosArrayGetSize(p->vgroupIdList); - if (code != TSDB_CODE_SUCCESS) { - return code; + size_t vgroupsz = sizeof(SVgroupInfo) * s + sizeof(SVgroupsInfo); + pTableMetaInfo->vgroupList = calloc(1, vgroupsz); + if (pTableMetaInfo->vgroupList == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + pTableMetaInfo->vgroupList->numOfVgroups = (int32_t) s; + for(int32_t j = 0; j < s; ++j) { + int32_t* id = taosArrayGet(p->vgroupIdList, j); + + // check if current buffer contains the vgroup info. If not, add it + SNewVgroupInfo existVgroupInfo = {.inUse = -1,}; + taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo); + + assert(existVgroupInfo.inUse >= 0); + SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; + + pVgroup->numOfEps = existVgroupInfo.numOfEps; + pVgroup->vgId = existVgroupInfo.vgId; + for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) { + pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port; + pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN); + } + } } } - return TSDB_CODE_SUCCESS; + return code; } static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e975dd7b06..63bb9ee214 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -157,7 +157,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) { assert(vgId > 0); SNewVgroupInfo vgroupInfo = {.vgId = -1}; - taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo); assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0); tscDebug("before: Endpoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps); @@ -344,6 +344,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { rpcFreeCont(rpcMsg->pCont); return; } + assert(pSql->self == handle); STscObj *pObj = pSql->pTscObj; @@ -614,7 +615,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT; SNewVgroupInfo vgroupInfo = {0}; - taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo); tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); tscDebug("0x%"PRIx64" submit msg built, numberOfEP:%d", pSql->self, pSql->epSet.numOfEps); @@ -687,7 +688,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab vgId = pTableMeta->vgId; SNewVgroupInfo vgroupInfo = {0}; - taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo); tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); } @@ -1582,7 +1583,7 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) { STableMeta *pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; SNewVgroupInfo vgroupInfo = {.vgId = -1}; - taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo); assert(vgroupInfo.vgId > 0); tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); @@ -1809,34 +1810,6 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_SUCCESS; } -int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { -#if 0 - SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd); - - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload; - - int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pInfoMsg->tableFname); - if (code != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - - pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0); - - char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg); - - if (pCmd->autoCreated && pCmd->tagData.dataLen != 0) { - pMsg = serializeTagData(&pCmd->tagData, pMsg); - } - - pCmd->payloadLen = (int32_t)(pMsg - (char*)pInfoMsg); - pCmd->msgType = TSDB_MSG_TYPE_CM_TABLE_META; -#endif - - return TSDB_CODE_SUCCESS; -} - /** * multi table meta req pkg format: * |SMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ...... @@ -1996,20 +1969,17 @@ static int32_t tableMetaMsgConvert(STableMetaMsg* pMetaMsg) { } // update the vgroupInfo if needed -static void doUpdateVgroupInfo(STableMeta *pTableMeta, SVgroupMsg *pVgroupMsg) { - if (pTableMeta->vgId > 0) { - int32_t vgId = pTableMeta->vgId; - assert(pTableMeta->tableType != TSDB_SUPER_TABLE); +static void doUpdateVgroupInfo(int32_t vgId, SVgroupMsg *pVgroupMsg) { + assert(vgId > 0); - SNewVgroupInfo vgroupInfo = {.inUse = -1}; - taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); + SNewVgroupInfo vgroupInfo = {.inUse = -1}; + taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo); - // vgroup info exists, compare with it - if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, pVgroupMsg)) || (vgroupInfo.inUse < 0)) { - vgroupInfo = createNewVgroupInfo(pVgroupMsg); - taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo)); - tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap)); - } + // vgroup info exists, compare with it + if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, pVgroupMsg)) || (vgroupInfo.inUse < 0)) { + vgroupInfo = createNewVgroupInfo(pVgroupMsg); + taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo)); + tscDebug("add/update new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap)); } } @@ -2022,18 +1992,18 @@ static void doAddTableMetaToLocalBuf(STableMeta* pTableMeta, STableMetaMsg* pMet if (updateSTable) { STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg); uint32_t size = tscGetTableMetaSize(pSupTableMeta); - int32_t code = taosHashPut(tscTableMetaInfo, pTableMeta->sTableName, len, pSupTableMeta, size); + int32_t code = taosHashPut(tscTableMetaMap, pTableMeta->sTableName, len, pSupTableMeta, size); assert(code == TSDB_CODE_SUCCESS); tfree(pSupTableMeta); } CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta); - taosHashPut(tscTableMetaInfo, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta)); + taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta)); tfree(cMeta); } else { uint32_t s = tscGetTableMetaSize(pTableMeta); - taosHashPut(tscTableMetaInfo, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s); + taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s); } } @@ -2058,7 +2028,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { assert(strncmp(pMetaMsg->tableFname, name, tListLen(pMetaMsg->tableFname)) == 0); doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, true); - doUpdateVgroupInfo(pTableMeta, &pMetaMsg->vgroup); + if (pTableMeta->tableType != TSDB_SUPER_TABLE) { + doUpdateVgroupInfo(pTableMeta->vgId, &pMetaMsg->vgroup); + } tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self, pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name), pTableMeta->tableInfo.numOfColumns, @@ -2068,6 +2040,37 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } +static SArray* createVgroupIdListFromMsg(char* pMsg, SHashObj* pSet, char* name, int32_t* size, uint64_t id) { + SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg; + + pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups); + *size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg)); + + SArray* vgroupIdList = taosArrayInit(pVgroupMsg->numOfVgroups, sizeof(int32_t)); + + if (pVgroupMsg->numOfVgroups <= 0) { + tscDebug("0x%" PRIx64 " empty vgroup id list, no corresponding tables for stable:%s", id, name); + } else { + // just init, no need to lock + for (int32_t j = 0; j < pVgroupMsg->numOfVgroups; ++j) { + SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; + vmsg->vgId = htonl(vmsg->vgId); + for (int32_t k = 0; k < vmsg->numOfEps; ++k) { + vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port); + } + + taosArrayPush(vgroupIdList, &vmsg->vgId); + + if (taosHashGet(pSet, &vmsg->vgId, sizeof(vmsg->vgId)) == NULL) { + taosHashPut(pSet, &vmsg->vgId, sizeof(vmsg->vgId), "", 0); + doUpdateVgroupInfo(vmsg->vgId, vmsg); + } + } + } + + return vgroupIdList; +} + static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t id) { SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg; pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups); @@ -2092,24 +2095,14 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port); } - SNewVgroupInfo newVi = createNewVgroupInfo(vmsg); - pVgroup->numOfEps = newVi.numOfEps; - pVgroup->vgId = newVi.vgId; + pVgroup->numOfEps = vmsg->numOfEps; + pVgroup->vgId = vmsg->vgId; for (int32_t k = 0; k < vmsg->numOfEps; ++k) { - pVgroup->epAddr[k].port = newVi.ep[k].port; - pVgroup->epAddr[k].fqdn = strndup(newVi.ep[k].fqdn, TSDB_FQDN_LEN); + pVgroup->epAddr[k].port = vmsg->epAddr[k].port; + pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); } - // check if current buffer contains the vgroup info. - // If not, add it - SNewVgroupInfo existVgroupInfo = {.inUse = -1}; - taosHashGetClone(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), NULL, &existVgroupInfo, sizeof(SNewVgroupInfo)); - - if (((existVgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&existVgroupInfo, vmsg)) || - (existVgroupInfo.inUse < 0)) { // vgroup info exists, compare with it - taosHashPut(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), &newVi, sizeof(newVi)); - tscDebug("0x%" PRIx64 " add new VgroupInfo, vgId:%d, total cached:%d", id, newVi.vgId, (int32_t)taosHashGetSize(tscVgroupMap)); - } + doUpdateVgroupInfo(pVgroup->vgId, vmsg); } } @@ -2187,6 +2180,8 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { char* buf = NULL; char* pMsg = pMultiMeta->meta; + + // decompresss the message payload if (pMultiMeta->compressed) { buf = malloc(pMultiMeta->rawLen - sizeof(SMultiTableMeta)); int32_t len = tsDecompressString(pMultiMeta->meta, pMultiMeta->contLen - sizeof(SMultiTableMeta), 1, @@ -2245,7 +2240,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { // for each vgroup, only update the information once. int64_t vgId = pMetaMsg->vgroup.vgId; if (pTableMeta->tableType != TSDB_SUPER_TABLE && taosHashGet(pSet, &vgId, sizeof(vgId)) == NULL) { - doUpdateVgroupInfo(pTableMeta, &pMetaMsg->vgroup); + doUpdateVgroupInfo(vgId, &pMetaMsg->vgroup); taosHashPut(pSet, &vgId, sizeof(vgId), "", 0); } @@ -2263,11 +2258,26 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { assert(p != NULL); int32_t size = 0; - if (p->pVgroupInfo!= NULL) { - tscVgroupInfoClear(p->pVgroupInfo); - //tfree(p->pTableMeta); + if (p->vgroupIdList!= NULL) { + taosArrayDestroy(p->vgroupIdList); } - p->pVgroupInfo = createVgroupInfoFromMsg(pMsg, &size, pSql->self); + + char tableName[TSDB_TABLE_FNAME_LEN] = {0}; + tstrncpy(tableName, name, TSDB_TABLE_NAME_LEN); + p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, tableName, &size, pSql->self); + + int32_t numOfVgId = taosArrayGetSize(p->vgroupIdList); + int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t); + + tFilePage* idList = calloc(1, s); + idList->num = numOfVgId; + memcpy(idList->data, TARRAY_GET_START(p->vgroupIdList), numOfVgId * sizeof(int32_t)); + + void* idListInst = taosCachePut(tscVgroupListBuf, tableName, strlen(tableName), idList, s, 5000); + taosCacheRelease(tscVgroupListBuf, (void*) &idListInst, false); + + tfree(idList); + pMsg += size; } @@ -2503,7 +2513,7 @@ int tscProcessDropDbRsp(SSqlObj *pSql) { //TODO LOCK DB WHEN MODIFY IT //pSql->pTscObj->db[0] = 0; - taosHashClear(tscTableMetaInfo); + taosHashClear(tscTableMetaMap); return 0; } @@ -2514,8 +2524,8 @@ int tscProcessDropTableRsp(SSqlObj *pSql) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); - tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaInfo)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaMap)); tfree(pTableMetaInfo->pTableMeta); return 0; @@ -2530,11 +2540,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { tscDebug("0x%"PRIx64" remove tableMeta in hashMap after alter-table: %s", pSql->self, name); bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); tfree(pTableMetaInfo->pTableMeta); if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta - taosHashClear(tscTableMetaInfo); + taosHashClear(tscTableMetaMap); } return 0; @@ -2801,7 +2811,7 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool tNameExtractFullName(&pTableMetaInfo->name, name); size_t len = strlen(name); - taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1); + taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMetaInfo->pTableMeta); // TODO resize the tableMeta assert(size < 80 * TSDB_MAX_COLUMNS); @@ -2914,7 +2924,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { // remove stored tableMeta info in hash table size_t len = strlen(name); - taosHashRemove(tscTableMetaInfo, name, len); + taosHashRemove(tscTableMetaMap, name, len); return getTableMetaFromMnode(pSql, pTableMetaInfo, false); } @@ -2966,8 +2976,6 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, SQueryInfo* pQueryInfo) { tscDebug("0x%"PRIx64" svgroupRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->svgroupRid, pNew->self); pSql->svgroupRid = pNew->self; - - tscDebug("0x%"PRIx64" new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql->self, pNew, pNewQueryInfo->numOfTables); pNew->fp = tscTableMetaCallBack; @@ -3010,7 +3018,6 @@ void tscInitMsgsFp() { tscBuildMsg[TSDB_SQL_CONNECT] = tscBuildConnectMsg; tscBuildMsg[TSDB_SQL_USE_DB] = tscBuildUseDbMsg; -// tscBuildMsg[TSDB_SQL_META] = tscBuildTableMetaMsg; tscBuildMsg[TSDB_SQL_STABLEVGROUP] = tscBuildSTableVgroupMsg; tscBuildMsg[TSDB_SQL_RETRIEVE_FUNC] = tscBuildRetrieveFuncMsg; diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index da5bdf669f..502ef22d4b 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -206,7 +206,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); tfree(pTableMetaInfo->pTableMeta); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b72bd78b1b..b98ffd7638 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3125,7 +3125,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) for(int32_t i = 0; i < pParentObj->cmd.insertParam.numOfTables; ++i) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(pParentObj->cmd.insertParam.pTableNameList[i], name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } pParentObj->res.code = TSDB_CODE_SUCCESS; diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 7b8f24a093..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -19,15 +19,12 @@ #include "trpc.h" #include "tnote.h" #include "ttimer.h" -#include "tutil.h" #include "tsched.h" #include "tscLog.h" -#include "tscUtil.h" #include "tsclient.h" #include "tglobal.h" #include "tconfig.h" #include "ttimezone.h" -#include "tlocale.h" #include "qScript.h" // global, not configurable @@ -36,8 +33,10 @@ int32_t sentinel = TSC_VAR_NOT_RELEASE; -SHashObj *tscVgroupMap; // hash map to keep the global vgroup info -SHashObj *tscTableMetaInfo; // table meta info +SHashObj *tscVgroupMap; // hash map to keep the vgroup info from mnode +SHashObj *tscTableMetaMap; // table meta info buffer +SCacheObj *tscVgroupListBuf; // super table vgroup list information, only survives 5 seconds for each super table vgroup list + int32_t tscObjRef = -1; void *tscTmr; void *tscQhandle; @@ -45,17 +44,21 @@ int32_t tscRefId = -1; int32_t tscNumOfObj = 0; // number of sqlObj in current process. static void *tscCheckDiskUsageTmr; void *tscRpcCache; // cache to keep rpc obj -int32_t tscNumOfThreads = 1; // num of rpc threads -char tscLogFileName[12] = "taoslog"; -int tscLogFileNum = 10; -static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently -static pthread_once_t tscinit = PTHREAD_ONCE_INIT; +int32_t tscNumOfThreads = 1; // num of rpc threads +char tscLogFileName[12] = "taoslog"; +int tscLogFileNum = 10; + +static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently +static pthread_once_t tscinit = PTHREAD_ONCE_INIT; + +// pthread_once can not return result code, so result code is set to a global variable. static volatile int tscInitRes = 0; void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) { taosGetDisk(); taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } + void tscFreeRpcObj(void *param) { assert(param); SRpcObj *pRpcObj = (SRpcObj *)(param); @@ -67,10 +70,9 @@ void tscReleaseRpc(void *param) { if (param == NULL) { return; } - pthread_mutex_lock(&rpcObjMutex); - taosCacheRelease(tscRpcCache, (void *)¶m, false); - pthread_mutex_unlock(&rpcObjMutex); -} + + taosCacheRelease(tscRpcCache, (void *)¶m, false); +} int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncrypt, void **ppRpcObj) { pthread_mutex_lock(&rpcObjMutex); @@ -80,7 +82,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry *ppRpcObj = pRpcObj; pthread_mutex_unlock(&rpcObjMutex); return 0; - } + } SRpcInit rpcInit; memset(&rpcInit, 0, sizeof(rpcInit)); @@ -104,7 +106,8 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry pthread_mutex_unlock(&rpcObjMutex); tscError("failed to init connection to TDengine"); return -1; - } + } + pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*5); if (pRpcObj == NULL) { rpcClose(rpcObj.pDnodeConn); @@ -118,7 +121,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry } void taos_init_imp(void) { - char temp[128] = {0}; + char temp[128] = {0}; errno = TSDB_CODE_SUCCESS; srand(taosGetTimestampSec()); @@ -151,36 +154,41 @@ void taos_init_imp(void) { rpcInit(); scriptEnvPoolInit(); + tscDebug("starting to initialize TAOS client ..."); tscDebug("Local End Point is:%s", tsLocalEp); } taosSetCoreDump(); tscInitMsgsFp(); - int queueSize = tsMaxConnections*2; double factor = (tscEmbedded == 0)? 2.0:4.0; tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor); if (tscNumOfThreads < 2) { tscNumOfThreads = 2; } + + int32_t queueSize = tsMaxConnections*2; tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc"); if (NULL == tscQhandle) { - tscError("failed to init scheduler"); + tscError("failed to init task queue"); tscInitRes = -1; return; } + tscDebug("client task queue is initialized, numOfWorkers: %d", tscNumOfThreads); + tscTmr = taosTmrInit(tsMaxConnections * 2, 200, 60000, "TSC"); if(0 == tscEmbedded){ taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } - if (tscTableMetaInfo == NULL) { - tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj); - tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); - tscTableMetaInfo = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); - tscDebug("TableMeta:%p", tscTableMetaInfo); + if (tscTableMetaMap == NULL) { + tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj); + tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + tscTableMetaMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); + tscVgroupListBuf = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, NULL, "stable-vgroup-list"); + tscDebug("TableMeta:%p, vgroup:%p is initialized", tscTableMetaMap, tscVgroupMap); } int refreshTime = 5; @@ -189,14 +197,17 @@ void taos_init_imp(void) { tscRefId = taosOpenRef(200, tscCloseTscObj); - // in other language APIs, taos_cleanup is not available yet. - // So, to make sure taos_cleanup will be invoked to clean up the allocated - // resource to suppress the valgrind warning. + // In the APIs of other program language, taos_cleanup is not available yet. + // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. atexit(taos_cleanup); + tscDebug("client is initialized successfully"); } -int taos_init() { pthread_once(&tscinit, taos_init_imp); return tscInitRes;} +int taos_init() { + pthread_once(&tscinit, taos_init_imp); + return tscInitRes; +} // this function may be called by user or system, or by both simultaneously. void taos_cleanup(void) { @@ -205,11 +216,13 @@ void taos_cleanup(void) { if (atomic_val_compare_exchange_32(&sentinel, TSC_VAR_NOT_RELEASE, TSC_VAR_RELEASED) != TSC_VAR_NOT_RELEASE) { return; } + if (tscEmbedded == 0) { scriptEnvPoolCleanup(); } - taosHashCleanup(tscTableMetaInfo); - tscTableMetaInfo = NULL; + + taosHashCleanup(tscTableMetaMap); + tscTableMetaMap = NULL; taosHashCleanup(tscVgroupMap); tscVgroupMap = NULL; @@ -236,6 +249,9 @@ void taos_cleanup(void) { pthread_mutex_destroy(&rpcObjMutex); } + taosCacheCleanup(tscVgroupListBuf); + tscVgroupListBuf = NULL; + if (tscEmbedded == 0) { rpcCleanup(); taosCloseLog(); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 0d69fe173f..4454844ea0 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1388,7 +1388,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) { if (pCmd->pTableMetaMap != NULL) { STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL); while (p) { - tscVgroupInfoClear(p->pVgroupInfo); + taosArrayDestroy(p->vgroupIdList); tfree(p->pTableMeta); p = taosHashIterate(pCmd->pTableMetaMap, p); } @@ -1522,7 +1522,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pDataBlock->tableName, name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } if (!pDataBlock->cloned) { @@ -3365,7 +3365,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) { if (removeMeta) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, name); - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables); @@ -4360,7 +4360,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v assert(pChild != NULL && buf != NULL); STableMeta* p = buf; - taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1); + taosHashGetClone(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p); // tableMeta exists, build child table meta according to the super table meta // the uid need to be checked in addition to the general name of the super table. @@ -4374,7 +4374,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v memcpy(pChild->schema, p->schema, sizeof(SSchema) *total); return TSDB_CODE_SUCCESS; } else { // super table has been removed, current tableMeta is also expired. remove it here - taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); return -1; } } diff --git a/src/query/src/qScript.c b/src/query/src/qScript.c index 261164a84c..74ddf5f548 100644 --- a/src/query/src/qScript.c +++ b/src/query/src/qScript.c @@ -342,6 +342,7 @@ int32_t scriptEnvPoolInit() { env->lua_state = createLuaEnv(); tdListAppend(pool->scriptEnvs, (void *)(&env)); } + pool->mSize = size; pool->cSize = size; return 0; diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 616b844c13..a53aa602c1 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -123,10 +123,9 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); * @param keyLen * @param fp * @param d - * @param dsize * @return */ -void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize); +void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d); /** * remove item with the specified key diff --git a/src/util/src/hash.c b/src/util/src/hash.c index d7bee9b67c..2e18f36a17 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -294,10 +294,10 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da } void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { - return taosHashGetClone(pHashObj, key, keyLen, NULL, NULL, 0); + return taosHashGetClone(pHashObj, key, keyLen, NULL, NULL); } -void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize) { +void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d) { if (taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) { return NULL; } diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 8b17d3a5f2..e14b5a385e 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -93,7 +93,7 @@ static void vnodeIncRef(void *ptNode) { void *vnodeAcquire(int32_t vgId) { SVnodeObj *pVnode = NULL; if (tsVnodesHash != NULL) { - taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *)); + taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode); } if (pVnode == NULL) { From 551f54fc409b11b0834fe5168dd61dd6d170b305 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 15:36:26 +0800 Subject: [PATCH 13/38] [td-225] fix bug found by regression test. --- src/client/src/tscLocal.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 641f62f22b..ec7cb228dd 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -921,6 +921,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { pRes->code = tscProcessShowCreateDatabase(pSql); } else if (pCmd->command == TSDB_SQL_RESET_CACHE) { taosHashClear(tscTableMetaMap); + taosCacheEmpty(tscVgroupListBuf); pRes->code = TSDB_CODE_SUCCESS; } else if (pCmd->command == TSDB_SQL_SERV_VERSION) { pRes->code = tscProcessServerVer(pSql); From a54429b74c8a8541dbdf1819699fdbd86c207002 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 22:32:44 +0800 Subject: [PATCH 14/38] [td-225]fix compiler error. --- src/client/src/tscSQLParser.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 36f252a9ba..d4bbeded3c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8157,12 +8157,12 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, pTableName); } else { tFilePage* pdata = (tFilePage*) pv; - pVgroupIdList = taosArrayInit(pdata->num, sizeof(int32_t)); + pVgroupIdList = taosArrayInit((size_t) pdata->num, sizeof(int32_t)); if (pVgroupIdList == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - taosArrayAddBatch(pVgroupIdList, pdata->data, pdata->num); + taosArrayAddBatch(pVgroupIdList, pdata->data, (int32_t) pdata->num); taosCacheRelease(tscVgroupListBuf, &pv, false); } } From eb44d43bba0bc43b6bff30ce8c60b4e81312f23c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 22:44:20 +0800 Subject: [PATCH 15/38] [td-225]fix compiler error. --- src/client/src/tscServer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 63bb9ee214..1c0e665276 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2240,7 +2240,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { // for each vgroup, only update the information once. int64_t vgId = pMetaMsg->vgroup.vgId; if (pTableMeta->tableType != TSDB_SUPER_TABLE && taosHashGet(pSet, &vgId, sizeof(vgId)) == NULL) { - doUpdateVgroupInfo(vgId, &pMetaMsg->vgroup); + doUpdateVgroupInfo((int32_t) vgId, &pMetaMsg->vgroup); taosHashPut(pSet, &vgId, sizeof(vgId), "", 0); } From 6e015f2cf691bfe91c7a780cce26ad494d556718 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 24 Jul 2021 22:53:32 +0800 Subject: [PATCH 16/38] [td-225] fix compiler error. --- src/client/src/tscServer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 1c0e665276..dd40314265 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2266,7 +2266,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { tstrncpy(tableName, name, TSDB_TABLE_NAME_LEN); p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, tableName, &size, pSql->self); - int32_t numOfVgId = taosArrayGetSize(p->vgroupIdList); + int32_t numOfVgId = (int32_t) taosArrayGetSize(p->vgroupIdList); int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t); tFilePage* idList = calloc(1, s); From cbbb0593857c3c1d8dc6507562df6dec4d4336d6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 25 Jul 2021 16:04:20 +0800 Subject: [PATCH 17/38] [td-225]fix bug found by regression test. --- src/client/src/tscSQLParser.c | 19 +++++++++---------- src/client/src/tscServer.c | 35 +++++++++++++++-------------------- src/common/inc/tcmdtype.h | 5 +---- src/mnode/src/mnodeTable.c | 8 ++------ 4 files changed, 27 insertions(+), 40 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index d4bbeded3c..fef9aafad0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7237,7 +7237,7 @@ void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex) { } tmpLen = - sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", name, pExpr->base.uid, pExpr->base.colInfo.colId); + sprintf(tmpBuf, "%s(uid:%" PRIu64 ", %d)", name, pExpr->base.uid, pExpr->base.colInfo.colId); if (tmpLen + offset >= totalBufSize - 1) break; @@ -8135,9 +8135,6 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name); // avoid mem leak, may should update pTableMeta - const char* pTableName = tNameGetTableName(pname); - size_t nameLen = strlen(pTableName); - void* pVgroupIdList = NULL; if (pTableMeta->tableType == TSDB_CHILD_TABLE) { code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, pSql->pBuf); @@ -8150,11 +8147,12 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { } } else if (pTableMeta->tableType == TSDB_SUPER_TABLE) { // the vgroup list of super table is not kept in local buffer, so here need retrieve it from the mnode each time - void* pv = taosCacheAcquireByKey(tscVgroupListBuf, pTableName, nameLen); + tscDebug("0x%"PRIx64" try to acquire cached super table %s vgroup id list", pSql->self, name); + void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len); if (pv == NULL) { char* t = strdup(name); taosArrayPush(pVgroupList, &t); - tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, pTableName); + tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, name); } else { tFilePage* pdata = (tFilePage*) pv; pVgroupIdList = taosArrayInit((size_t) pdata->num, sizeof(int32_t)); @@ -8167,10 +8165,10 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { } } - if (taosHashGet(pCmd->pTableMetaMap, pTableName, nameLen) == NULL) { + if (taosHashGet(pCmd->pTableMetaMap, name, len) == NULL) { STableMeta* pMeta = tscTableMetaDup(pTableMeta); STableMetaVgroupInfo tvi = { .pTableMeta = pMeta, .vgroupIdList = pVgroupIdList}; - taosHashPut(pCmd->pTableMetaMap, pTableName, nameLen, &tvi, sizeof(STableMetaVgroupInfo)); + taosHashPut(pCmd->pTableMetaMap, name, len, &tvi, sizeof(STableMetaVgroupInfo)); } } else { // Add to the retrieve table meta array list. @@ -8290,8 +8288,9 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod strncpy(pTableMetaInfo->aliasName, tNameGetTableName(&pTableMetaInfo->name), tListLen(pTableMetaInfo->aliasName)); } - const char* name = tNameGetTableName(&pTableMetaInfo->name); - STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, name, strlen(name)); + char fname[TSDB_TABLE_FNAME_LEN] = {0}; + tNameExtractFullName(&pTableMetaInfo->name, fname); + STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, fname, strnlen(fname, TSDB_TABLE_FNAME_LEN)); pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta); assert(pTableMetaInfo->pTableMeta != NULL); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index dd40314265..8a11cd6b93 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -391,9 +391,9 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { } if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && - (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || - rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || - rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || + (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || // change the retry procedure + /*(*/rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || + rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || // change the retry procedure rpcMsg->code == TSDB_CODE_APP_NOT_READY)) { pSql->retry++; @@ -404,7 +404,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry); } else { // wait for a little bit moment and then retry - // todo do not sleep in rpc callback thread, add this process into queueu to process + // todo do not sleep in rpc callback thread, add this process into queue to process if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { int32_t duration = getWaitingTimeInterval(pSql->retry); taosMsleep(duration); @@ -2214,15 +2214,10 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { return TSDB_CODE_TSC_INVALID_VALUE; } - SName sn = {0}; - tNameFromString(&sn, pMetaMsg->tableFname, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - if (pMultiMeta->metaClone == 1 || pTableMeta->tableType == TSDB_SUPER_TABLE) { STableMetaVgroupInfo p = {.pTableMeta = pTableMeta,}; - - const char* tableName = tNameGetTableName(&sn); - size_t keyLen = strlen(tableName); - taosHashPut(pParentCmd->pTableMetaMap, tableName, keyLen, &p, sizeof(STableMetaVgroupInfo)); + size_t keyLen = strnlen(pMetaMsg->tableFname, TSDB_TABLE_FNAME_LEN); + taosHashPut(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen, &p, sizeof(STableMetaVgroupInfo)); } else { freeMeta = true; } @@ -2251,10 +2246,13 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { } for(int32_t i = 0; i < pMultiMeta->numOfVgroup; ++i) { - char* name = pMsg; - pMsg += TSDB_TABLE_NAME_LEN; + char fname[TSDB_TABLE_FNAME_LEN] = {0}; + tstrncpy(fname, pMsg, TSDB_TABLE_FNAME_LEN); + size_t len = strnlen(fname, TSDB_TABLE_FNAME_LEN); - STableMetaVgroupInfo* p = taosHashGet(pParentCmd->pTableMetaMap, name, strnlen(name, TSDB_TABLE_NAME_LEN)); + pMsg += TSDB_TABLE_FNAME_LEN; + + STableMetaVgroupInfo* p = taosHashGet(pParentCmd->pTableMetaMap, fname, len); assert(p != NULL); int32_t size = 0; @@ -2262,9 +2260,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { taosArrayDestroy(p->vgroupIdList); } - char tableName[TSDB_TABLE_FNAME_LEN] = {0}; - tstrncpy(tableName, name, TSDB_TABLE_NAME_LEN); - p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, tableName, &size, pSql->self); + p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, fname, &size, pSql->self); int32_t numOfVgId = (int32_t) taosArrayGetSize(p->vgroupIdList); int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t); @@ -2273,11 +2269,10 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { idList->num = numOfVgId; memcpy(idList->data, TARRAY_GET_START(p->vgroupIdList), numOfVgId * sizeof(int32_t)); - void* idListInst = taosCachePut(tscVgroupListBuf, tableName, strlen(tableName), idList, s, 5000); + void* idListInst = taosCachePut(tscVgroupListBuf, fname, len, idList, s, 5000); taosCacheRelease(tscVgroupListBuf, (void*) &idListInst, false); tfree(idList); - pMsg += size; } @@ -2918,7 +2913,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; if (pTableMeta) { - tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql->self, name, + tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRIu64, pSql->self, name, tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid); } diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h index bd2c2e46f8..918763ebb4 100644 --- a/src/common/inc/tcmdtype.h +++ b/src/common/inc/tcmdtype.h @@ -88,10 +88,7 @@ enum { TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable") TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database") - /* - * build empty result instead of accessing dnode to fetch result - * reset the client cache - */ + // build empty result instead of accessing dnode to fetch result reset the client cache TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" ) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 6e5cf14b96..0bc114ffdf 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1812,12 +1812,8 @@ static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) } static char* serializeVgroupInfo(SSTableObj *pTable, char* name, char* msg, SMnodeMsg* pMsgBody, void* handle) { - SName sn = {0}; - tNameFromString(&sn, name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - const char* tableName = tNameGetTableName(&sn); - - strncpy(msg, tableName, TSDB_TABLE_NAME_LEN); - msg += TSDB_TABLE_NAME_LEN; + strncpy(msg, name, TSDB_TABLE_FNAME_LEN); + msg += TSDB_TABLE_FNAME_LEN; if (pTable->vgHash == NULL) { mDebug("msg:%p, app:%p stable:%s, no vgroup exist while get stable vgroup info", pMsgBody, handle, name); From 4eeea0d8d6971556b81c3a8cb0526ed27ff1b27c Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 26 Jul 2021 10:14:47 +0800 Subject: [PATCH 18/38] [TD-5523]: arm32 4byte long error. --- src/query/src/qExecutor.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 982c45c441..d479e90415 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -149,12 +149,12 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) { int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor); tm.tm_year = mon / 12; tm.tm_mon = mon % 12; - tw->skey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); + tw->skey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); mon = (int)(mon + interval); tm.tm_year = mon / 12; tm.tm_mon = mon % 12; - tw->ekey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); + tw->ekey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); tw->ekey -= 1; } From 2b99aa162ff52fcf2baec8056369b3bf5f0214cf Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 26 Jul 2021 13:39:39 +0800 Subject: [PATCH 19/38] [td-225]fix bug found by regression test. --- src/client/src/tscServer.c | 33 ++++++++++++++++++++++++++++++--- src/inc/taosmsg.h | 2 +- tests/examples/c/apitest.c | 2 +- 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 8a11cd6b93..8276ec4359 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2337,14 +2337,18 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { SSqlCmd* pCmd = &parent->cmd; SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); + char fName[TSDB_TABLE_FNAME_LEN] = {0}; for(int32_t i = 0; i < pStableVgroup->numOfTables; ++i) { char* name = pMsg; - pMsg += TSDB_TABLE_NAME_LEN; + pMsg += TSDB_TABLE_FNAME_LEN; STableMetaInfo *pInfo = NULL; for(int32_t j = 0; j < pQueryInfo->numOfTables; ++j) { STableMetaInfo *pInfo1 = tscGetTableMetaInfoFromCmd(pCmd, j); - if (strcmp(name, tNameGetTableName(&pInfo1->name)) != 0) { + memset(fName, 0, tListLen(fName)); + + tNameExtractFullName(&pInfo1->name, fName); + if (strcmp(name, fName) != 0) { continue; } @@ -2504,11 +2508,14 @@ int tscProcessUseDbRsp(SSqlObj *pSql) { return ret; } +//todo only invalid the buffered data that belongs to dropped databases int tscProcessDropDbRsp(SSqlObj *pSql) { //TODO LOCK DB WHEN MODIFY IT //pSql->pTscObj->db[0] = 0; taosHashClear(tscTableMetaMap); + taosHashClear(tscVgroupMap); + taosCacheEmpty(tscVgroupListBuf); return 0; } @@ -2892,6 +2899,10 @@ int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo) { return code; } +static void freeElem(void* p) { + tfree(*(char**)p); +} + /** * retrieve table meta from mnode, and then update the local table meta hashmap. * @param pSql sql object @@ -2921,7 +2932,23 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { size_t len = strlen(name); taosHashRemove(tscTableMetaMap, name, len); - return getTableMetaFromMnode(pSql, pTableMetaInfo, false); + if (pTableMeta->tableType == TSDB_SUPER_TABLE) { + void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len); + if (pv != NULL) { + taosCacheRelease(tscVgroupListBuf, &pv, true); + } + } + + SArray* pNameList = taosArrayInit(1, POINTER_BYTES); + SArray* vgroupList = taosArrayInit(1, POINTER_BYTES); + + char* n = strdup(name); + taosArrayPush(pNameList, &n); + code = getMultiTableMetaFromMnode(pSql, pNameList, vgroupList, NULL, tscTableMetaCallBack, true); + taosArrayDestroyEx(pNameList, freeElem); + taosArrayDestroyEx(vgroupList, freeElem); + + return code; } static bool allVgroupInfoRetrieved(SQueryInfo* pQueryInfo) { diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 9ee241efc1..2370b909ef 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -809,7 +809,7 @@ typedef struct SMultiTableMeta { int32_t contLen; uint8_t compressed; // denote if compressed or not uint32_t rawLen; // size before compress - uint8_t metaClone; // make meta clone after retrieve meta from mnode + uint8_t metaClone; // make meta clone after retrieve meta from mnode char meta[]; } SMultiTableMeta; diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index ac522d6151..2b0d36a40f 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -1009,7 +1009,7 @@ int main(int argc, char *argv[]) { info = taos_get_client_info(taos); printf("client info: %s\n", info); - printf("************ verify shemaless *************\n"); + printf("************ verify schema-less *************\n"); verify_schema_less(taos); From c63d6f4c94344f7ec4564b137791edd1829ac0fb Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Mon, 26 Jul 2021 18:00:02 +0800 Subject: [PATCH 20/38] [TD-5369] fulltest.sh --- tests/pytest/fulltest.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index ab7dedc959..923934ac69 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -161,7 +161,11 @@ python3 test.py -f tools/taosdemoTestTblAlt.py python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py - +# nano support +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py # update @@ -363,15 +367,15 @@ python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_cacheLastRow.py python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py - -# nano support -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py python3 ./test.py -f insert/flushwhiledrop.py #======================p4-end=============== python3 test.py -f tools/taosdemoAllTest/pytest.py + + + + + + From 74fa1114f314738539819e519d5e56daab909bf4 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 27 Jul 2021 11:06:04 +0800 Subject: [PATCH 21/38] [TD-5369] change the nums of tables and insert rows! --- .../taosdemoTestNanoDatabase.json | 8 ++--- .../taosdemoTestNanoDatabaseNow.json | 2 +- .../taosdemoTestSupportNanoInsert.py | 36 +++++++++---------- .../taosdemoTestSupportNanoQuery.py | 22 ++++++------ .../taosdemoTestSupportNanoQuerycsv.json | 2 +- .../taosdemoTestSupportNanoSubscribe.json | 2 +- .../taosdemoTestSupportNanosubscribe.py | 6 ++-- 7 files changed, 38 insertions(+), 40 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json index 9010415fe6..246f1c35f2 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json @@ -35,13 +35,13 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb0_", "auto_create_table": "no", "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10000, + "insert_rows": 100, "childtable_offset":0, "multi_thread_write_one_tbl": "no", "insert_interval":0, @@ -61,13 +61,13 @@ { "name": "stb1", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb1_", "auto_create_table": "no", "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10000, + "insert_rows": 100, "childtable_offset":0, "multi_thread_write_one_tbl": "no", "insert_interval":0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json index d2542a0eba..f36b1f9b4c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json @@ -35,7 +35,7 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb0_", "auto_create_table": "no", "batch_create_tbl_num": 20, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 88a917da85..266a8fa712 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -56,46 +56,47 @@ class TDTestCase: os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) tdSql.execute("use nsdb") tdSql.query("show stables") - tdSql.checkData(0, 4, 1000) + tdSql.checkData(0, 4, 100) tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb0_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000000) + tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") tdSql.checkDataType(9, 1,"TIMESTAMP") tdSql.query("select last(ts) from stb0") - tdSql.getData(0, 0) + tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") # check stable stb1 which is insert with disord tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb1_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 10000000) + tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb1") tdSql.checkDataType(9, 1,"TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") - tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") - + tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") + # insert data from now time # check stable stb0 os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath) + tdSql.execute("use nsdb2") tdSql.query("show stables") - tdSql.checkData(0, 4, 1000) + tdSql.checkData(0, 4, 100) tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb0_0") tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 100000) + tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb0") tdSql.checkDataType(9,1,"TIMESTAMP") @@ -117,16 +118,11 @@ class TDTestCase: os.system("rm -rf ./insert_res.txt") os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") + # taosdemo test insert with command and parameter , detals show taosdemo --help - - os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) - - os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 600) - # check taosdemo -s sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;', diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py index 21a7037ce6..5a37cf9c7c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py @@ -55,15 +55,15 @@ class TDTestCase: # use where to filter - tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") - tdSql.checkData(0, 0, 3999000) - tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") - tdSql.checkData(0, 0, 1000000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ") + tdSql.checkData(0, 0, 4000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 5900) - tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") - tdSql.checkData(0, 0, 3999) - tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") - tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;") + tdSql.checkData(0, 0, 40) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 59) # select max min avg from special col @@ -87,8 +87,8 @@ class TDTestCase: print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) tdSql.query("select count(*) from stb0 group by tbname;") - tdSql.checkData(0, 0, 10000) - tdSql.checkData(100, 0, 10000) + tdSql.checkData(0, 0, 100) + tdSql.checkData(10, 0, 100) # query : query above sqls by taosdemo and continuously @@ -105,7 +105,7 @@ class TDTestCase: tdSql.checkDataType(3, 1, "TIMESTAMP") tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.query("select count(*) from stb0 where ts now -22d-1h-3s ;", + "sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;", "result": "./query_res1.txt" }, { diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json index 26d405b65b..1cc834164e 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json @@ -20,7 +20,7 @@ "result": "./subscribe_res0.txt" }, { - "sql": "select * from stb0 where ts > now -20d-1h-3s ;", + "sql": "select * from stb0 where ts < now -2d-1h-3s ;", "result": "./subscribe_res1.txt" }, { diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py index f6324577c1..6dcea6e7e0 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py @@ -79,7 +79,7 @@ class TDTestCase: # merge result files - sleep(20) + sleep(10) os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") @@ -90,7 +90,7 @@ class TDTestCase: self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) subTimes1 = self.subTimes("all_subscribe_res1.txt") - self.assertCheck("all_subscribe_res1.txt",subTimes1 ,0) + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,200) subTimes2 = self.subTimes("all_subscribe_res2.txt") self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) @@ -103,6 +103,7 @@ class TDTestCase: os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") subTimes0 = self.subTimes("all_subscribe_res0.txt") + print("pass") self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) @@ -112,6 +113,7 @@ class TDTestCase: sleep(3) os.system("rm -rf ./subscribe_res*") os.system("rm -rf ./all_subscribe*") + os.system("rm -rf ./*.py.sql") From 8aa0a37392c978a1f93448d839bc732e93c3d5c1 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 27 Jul 2021 11:40:47 +0800 Subject: [PATCH 22/38] [TD-4432]: add taodemo-testcase that using stmt interface --- .../taosdemoTestInsertWithJsonStmt.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py index cce6c83a07..0aade43183 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py @@ -244,17 +244,17 @@ class TDTestCase: tdSql.query("select count(*) from stb1") tdSql.checkData(0, 0, 10) - # # insert: sample json - # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) - # tdSql.execute("use dbtest123") - # tdSql.query("select c2 from stb0") - # tdSql.checkData(0, 0, 2147483647) - # tdSql.query("select * from stb1 where t1=-127") - # tdSql.checkRows(20) - # tdSql.query("select * from stb1 where t2=127") - # tdSql.checkRows(10) - # tdSql.query("select * from stb1 where t2=126") - # tdSql.checkRows(10) + # insert: sample json + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) + tdSql.execute("use dbtest123") + tdSql.query("select c2 from stb0") + tdSql.checkData(0, 0, 2147483647) + tdSql.query("select * from stb1 where t1=-127") + tdSql.checkRows(20) + tdSql.query("select * from stb1 where t2=127") + tdSql.checkRows(10) + tdSql.query("select * from stb1 where t2=126") + tdSql.checkRows(10) # insert: test interlace parament os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json -y " % binPath) From 6faf1d5bb3e98af1b587299b9a14bd95ec30ebca Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 27 Jul 2021 18:26:10 +0800 Subject: [PATCH 23/38] Update sdbCompClusterReplica2.py --- tests/pytest/wal/sdbCompClusterReplica2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/wal/sdbCompClusterReplica2.py b/tests/pytest/wal/sdbCompClusterReplica2.py index dd5a375151..ba80e3864a 100644 --- a/tests/pytest/wal/sdbCompClusterReplica2.py +++ b/tests/pytest/wal/sdbCompClusterReplica2.py @@ -86,7 +86,7 @@ class TwoClients: tdSql.execute("alter table stb2_0 add column col2 binary(4)") tdSql.execute("alter table stb2_0 drop column col1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") - tdSql.execute("drop dnode 2") + tdSql.execute("drop dnode chenhaoran02") sleep(10) os.system("rm -rf /var/lib/taos/*") print("clear dnode chenhaoran02'data files") @@ -142,4 +142,4 @@ class TwoClients: clients = TwoClients() clients.initConnection() # clients.getBuildPath() -clients.run() \ No newline at end of file +clients.run() From e0046dbb25f032b71a1745254309480ca07a50da Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 27 Jul 2021 22:46:03 +0800 Subject: [PATCH 24/38] [td-5175]: refactor the retry procedure while the error caused by tablemeta cache occurs. --- src/client/inc/tscUtil.h | 2 + src/client/src/tscAsync.c | 102 +++++------------------------------ src/client/src/tscServer.c | 74 +++++++++++-------------- src/client/src/tscSubquery.c | 39 +++++++++++--- src/client/src/tscUtil.c | 16 ++++++ 5 files changed, 94 insertions(+), 139 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index a4df124fa2..d0aecfe1a2 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -354,6 +354,8 @@ char* strdup_throw(const char* str); bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src); SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg); +void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id); + #ifdef __cplusplus } #endif diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index d857d00e15..910a80d6af 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -325,61 +325,6 @@ void tscAsyncResultOnError(SSqlObj* pSql) { int tscSendMsgToServer(SSqlObj *pSql); -static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SQueryInfo* pQueryInfo) { - // handle the invalid table error code for super table. - // update the pExpr info, colList info, number of table columns - // TODO Re-parse this sql and issue the corresponding subquery as an alternative for this case. - if (pSql->retryReason == TSDB_CODE_TDB_INVALID_TABLE_ID) { - int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo); - int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta); - - SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); - SSchema *pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - - for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr *pExpr = &(tscExprGet(pQueryInfo, i)->base); - - // update the table uid - pExpr->uid = pTableMetaInfo->pTableMeta->id.uid; - - if (pExpr->colInfo.colIndex >= 0) { - int32_t index = pExpr->colInfo.colIndex; - - if ((TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && index >= numOfCols) || - (TSDB_COL_IS_TAG(pExpr->colInfo.flag) && (index < 0 || index >= numOfTags))) { - return pSql->retryReason; - } - - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { - if ((pTagSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) && - strcasecmp(pExpr->colInfo.name, pTagSchema[pExpr->colInfo.colIndex].name) != 0) { - return pSql->retryReason; - } - } else if (TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag)) { - if ((pSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) && - strcasecmp(pExpr->colInfo.name, pSchema[pExpr->colInfo.colIndex].name) != 0) { - return pSql->retryReason; - } - } else { // do nothing for udc - } - } - } - - // validate the table columns information - for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) { - SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i); - if (pCol->columnIndex >= numOfCols) { - return pSql->retryReason; - } - } - } else { - // do nothing - } - - return TSDB_CODE_SUCCESS; -} - void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param); if (pSql == NULL) return; @@ -391,7 +336,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { pRes->code = code; SSqlObj *sub = (SSqlObj*) res; - const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; + const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"[multi-]tableMeta"; if (code != TSDB_CODE_SUCCESS) { tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code)); goto _error; @@ -401,31 +346,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (pSql->pStream == NULL) { SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); - // check if it is a sub-query of super table query first, if true, enter another routine - if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | - TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { - tscDebug("0x%" PRIx64 " update cached table-meta, continue to process sql and send the corresponding query", pSql->self); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); +// assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) == 0); - code = tscGetTableMeta(pSql, pTableMetaInfo); - assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); - - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, pSql->self); - return; - } - - assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); - code = updateMetaBeforeRetryQuery(pSql, pTableMetaInfo, pQueryInfo); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - // tscBuildAndSendRequest can add error into async res - tscBuildAndSendRequest(pSql, NULL); - taosReleaseRef(tscObjRef, pSql->self); - return; - } else { // continue to process normal async query + // super table subquery failure will be ignored +// if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | +// TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self); @@ -437,7 +362,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { goto _error; } - if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { + if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { @@ -448,17 +373,14 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { } (*pSql->fp)(pSql->param, pSql, code); - } else { - if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { - tscImportDataFromFile(pSql); - } else { - tscHandleMultivnodeInsert(pSql); - } + } else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert + tscImportDataFromFile(pSql); + } else { // sql string insert + tscHandleMultivnodeInsert(pSql); } } else { if (pSql->retryReason != TSDB_CODE_SUCCESS) { - tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", - pSql->self); + tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self); tscResetSqlCmd(pCmd, false); pSql->retryReason = TSDB_CODE_SUCCESS; } else { @@ -479,7 +401,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { taosReleaseRef(tscObjRef, pSql->self); return; - } +// } } else { // stream computing tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pCmd->command); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b60a28b9fd..dd17e25f58 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -390,33 +390,40 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { pSql->cmd.insertParam.schemaAttached = 1; } + // single table query error need to be handled here. if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && - (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || // change the retry procedure - /*(*/rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || + (((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || // change the retry procedure + rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) || rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || // change the retry procedure rpcMsg->code == TSDB_CODE_APP_NOT_READY)) { - - pSql->retry++; - tscWarn("0x%"PRIx64" it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry); - pSql->res.code = rpcMsg->code; // keep the previous error code - if (pSql->retry > pSql->maxRetry) { - tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry); - } else { - // wait for a little bit moment and then retry - // todo do not sleep in rpc callback thread, add this process into queue to process - if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { - int32_t duration = getWaitingTimeInterval(pSql->retry); - taosMsleep(duration); - } + if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | + TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) && + !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) { + // do nothing in case of super table subquery + } else { + pSql->retry += 1; + tscWarn("0x%" PRIx64 " it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry); - pSql->retryReason = rpcMsg->code; - rpcMsg->code = tscRenewTableMeta(pSql, 0); - // if there is an error occurring, proceed to the following error handling procedure. - if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, handle); - rpcFreeCont(rpcMsg->pCont); - return; + pSql->res.code = rpcMsg->code; // keep the previous error code + if (pSql->retry > pSql->maxRetry) { + tscError("0x%" PRIx64 " max retry %d reached, give up", pSql->self, pSql->maxRetry); + } else { + // wait for a little bit moment and then retry + // todo do not sleep in rpc callback thread, add this process into queue to process + if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { + int32_t duration = getWaitingTimeInterval(pSql->retry); + taosMsleep(duration); + } + + pSql->retryReason = rpcMsg->code; + rpcMsg->code = tscRenewTableMeta(pSql, 0); + // if there is an error occurring, proceed to the following error handling procedure. + if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, handle); + rpcFreeCont(rpcMsg->pCont); + return; + } } } } @@ -2521,14 +2528,7 @@ int tscProcessDropDbRsp(SSqlObj *pSql) { int tscProcessDropTableRsp(SSqlObj *pSql) { STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - - //The cached tableMeta is expired in this case, so clean it in hash table - char name[TSDB_TABLE_FNAME_LEN] = {0}; - tNameExtractFullName(&pTableMetaInfo->name, name); - - taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); - tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaMap)); - + tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self); tfree(pTableMetaInfo->pTableMeta); return 0; } @@ -2915,9 +2915,7 @@ static void freeElem(void* p) { * @return status code */ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { - SSqlCmd *pCmd = &pSql->cmd; - - SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); char name[TSDB_TABLE_FNAME_LEN] = {0}; @@ -2934,15 +2932,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { } // remove stored tableMeta info in hash table - size_t len = strlen(name); - taosHashRemove(tscTableMetaMap, name, len); - - if (pTableMeta->tableType == TSDB_SUPER_TABLE) { - void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len); - if (pv != NULL) { - taosCacheRelease(tscVgroupListBuf, &pv, true); - } - } + tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self); SArray* pNameList = taosArrayInit(1, POINTER_BYTES); SArray* vgroupList = taosArrayInit(1, POINTER_BYTES); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b98ffd7638..7c10114cfd 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2704,8 +2704,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tstrerror(pParentSql->res.code)); // release allocated resource - tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, - pState->numOfSub); + tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, pState->numOfSub); tscFreeRetrieveSup(pSql); @@ -2713,7 +2712,35 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd); if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { - (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); + + int32_t code = pParentSql->res.code; + if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) { + // remove the cached tableMeta and vgroup id list, and then parse the sql again + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentSql->cmd, 0); + tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); + + tscResetSqlCmd(&pParentSql->cmd, true); + pParentSql->res.code = TSDB_CODE_SUCCESS; + pParentSql->retry++; + + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, + tstrerror(code), pParentSql->retry); + + code = tsParseSql(pParentSql, true); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + return; + } + + if (code != TSDB_CODE_SUCCESS) { + pParentSql->res.code = code; + tscAsyncResultOnError(pParentSql); + return; + } + + executeQuery(pParentSql, pQueryInfo); + } else { + (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); + } } else { // regular super table query if (pParentSql->res.code != TSDB_CODE_SUCCESS) { tscAsyncResultOnError(pParentSql); @@ -2996,7 +3023,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { assert(code == taos_errno(pSql)); - if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { + if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID)) { tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry); int32_t sent = 0; @@ -3005,7 +3032,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { return; } } else { - tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times, set global code:%s", pParentSql->self, pSql->self, tstrerror(code)); + tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times or no need to retry, set global code:%s", pParentSql->self, pSql->self, tstrerror(code)); atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort } @@ -3129,8 +3156,6 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) } pParentObj->res.code = TSDB_CODE_SUCCESS; -// pParentObj->cmd.parseFinished = false; - tscResetSqlCmd(&pParentObj->cmd, false); // in case of insert, redo parsing the sql string and build new submit data block for two reasons: diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 4454844ea0..ae5a7a69a8 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4857,3 +4857,19 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) { return info; } + +void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { + char fname[TSDB_TABLE_FNAME_LEN] = {0}; + tNameExtractFullName(&pTableMetaInfo->name, fname); + + int32_t len = (int32_t) strnlen(fname, TSDB_TABLE_FNAME_LEN); + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + void* pv = taosCacheAcquireByKey(tscVgroupListBuf, fname, len); + if (pv != NULL) { + taosCacheRelease(tscVgroupListBuf, &pv, true); + } + } + + taosHashRemove(tscTableMetaMap, fname, len); + tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); +} \ No newline at end of file From 7845fc96821ec8f29fd5ca813e67cbaa38e17f9c Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:04:59 +0800 Subject: [PATCH 25/38] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 539 ++++++++++++++++++++++++++++ 1 file changed, 539 insertions(+) create mode 100644 tests/pytest/query/operator_cost.py diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py new file mode 100644 index 0000000000..b94d5fa3b3 --- /dev/null +++ b/tests/pytest/query/operator_cost.py @@ -0,0 +1,539 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5074 + + startTime = time.time() + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + tdSql.execute('''create table table_21 using stable_2 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdLog.info("========== operator=1(OP_TableScan) ==========") + tdLog.info("========== operator=7(OP_Project) ==========") + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=6(OP_Aggregate) ==========") + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=9(OP_Limit) ==========") + sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' + tdSql.query(sql) + tdSql.checkRows(5) + sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=16(OP_DummyInput) ==========") + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + + tdLog.info("========== operator=10(OP_SLimit) ==========") + sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=20(OP_Distinct) ==========") + tdLog.info("========== operator=4(OP_TagScan) ==========") + sql = '''select distinct(t_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + sql = '''select distinct(loc) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + sql = '''select distinct(tbname) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + + tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") + sql = '''select last(q_int),first(q_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bool),first(q_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_binary),first(q_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_float),first(q_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_double),first(q_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_ts),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), + first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), + first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=8(OP_Groupby) ==========") + sql = '''select stddev(q_int) from table_0 group by q_int;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' + tdSql.query(sql) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' + tdSql.query(sql) + + tdLog.info("========== operator=11(OP_TimeWindow) ==========") + sql = '''select last(q_int) from table_0 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=12(OP_SessionWindow) ==========") + sql = '''select count(*) from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*) from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=13(OP_Fill) ==========") + sql = '''select sum(q_int) from table_0 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + #TD-5190 + sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + + tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") + sql = '''select avg(q_int) from stable_1 where ts=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having sum(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having avg(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having min(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having max(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having first(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having last(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + + tdLog.info("========== operator=21(OP_Join) ==========") + sql = '''select t1.q_int,t2.q_int from + (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from table_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from table_0) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from stable_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.*,t3.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 + where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + + tdLog.info("========== operator=22(OP_StateWindow) ==========") + sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 state_window(q_bigint);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 state_window(q_smallint);''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 8b6d976e86db51c9106c054747bc48aacc243b05 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:05:16 +0800 Subject: [PATCH 26/38] [TD-5074]:test operator cost --- tests/pytest/query/operator.py | 539 --------------------------------- 1 file changed, 539 deletions(-) delete mode 100644 tests/pytest/query/operator.py diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py deleted file mode 100644 index b94d5fa3b3..0000000000 --- a/tests/pytest/query/operator.py +++ /dev/null @@ -1,539 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql -import random -import time - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - self.ts = 1600000000000 - self.num = 10 - - def run(self): - tdSql.prepare() - # test case for https://jira.taosdata.com:18080/browse/TD-5074 - - startTime = time.time() - - tdSql.execute('''create stable stable_1 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) - tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, - t_bool bool , t_binary binary(20) , t_nchar nchar(20) , - t_float float , t_double double , t_ts timestamp);''') - tdSql.execute('''create stable stable_2 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) - tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, - t_bool bool , t_binary binary(20) , t_nchar nchar(20) , - t_float float , t_double double , t_ts timestamp);''') - tdSql.execute('''create table table_0 using stable_1 - tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') - tdSql.execute('''create table table_1 using stable_1 - tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , - 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') - tdSql.execute('''create table table_2 using stable_1 - tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , - 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') - tdSql.execute('''create table table_3 using stable_1 - tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') - tdSql.execute('''create table table_4 using stable_1 - tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') - tdSql.execute('''create table table_5 using stable_1 - tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') - tdSql.execute('''create table table_21 using stable_2 - tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') - #regular table - tdSql.execute('''create table regular_table_1 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) ;''') - - for i in range(self.num): - tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' - % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, - i, i, random.random(), random.random(), 1262304000001 + i)) - tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' - % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, - i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) - tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' - % (self.ts + i, random.randint(-2147483647, 2147483647), - random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), - random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), - random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) - tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' - % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, - i, i, random.random(), random.random(), 1262304000001 + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' - % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, - i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' - % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), - random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), - random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), - random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' - % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) - - tdLog.info("========== operator=1(OP_TableScan) ==========") - tdLog.info("========== operator=7(OP_Project) ==========") - sql = '''select * from stable_1''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - sql = '''select * from regular_table_1''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - - tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") - sql = '''select last_row(*) from stable_1;''' - tdSql.query(sql) - tdSql.checkData(0,1,self.num-1) - - tdLog.info("========== operator=6(OP_Aggregate) ==========") - sql = '''select last_row(*) from regular_table_1;''' - tdSql.query(sql) - tdSql.checkData(0,1,self.num-1) - - tdLog.info("========== operator=9(OP_Limit) ==========") - sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' - tdSql.query(sql) - tdSql.checkRows(5) - sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' - tdSql.query(sql) - tdSql.checkRows(1) - - sql = '''select * from regular_table_1 ;''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - sql = '''select last_row(*) from (select * from regular_table_1);''' - tdSql.query(sql) - tdSql.checkRows(1) - tdSql.checkData(0,1,self.num-1) - - - sql = '''select last_row(*) from - ((select * from table_0) union all - (select * from table_1) union all - (select * from table_2));''' - tdSql.error(sql) - - tdLog.info("========== operator=16(OP_DummyInput) ==========") - sql = '''select last_row(*) from - ((select last_row(*) from table_0) union all - (select last_row(*) from table_1) union all - (select last_row(*) from table_2));''' - tdSql.error(sql) - - sql = '''select last_row(*) from - ((select * from table_0 limit 5 offset 5) union all - (select * from table_1 limit 5 offset 5) union all - (select * from regular_table_1 limit 5 offset 5));''' - tdSql.error(sql) - - tdLog.info("========== operator=10(OP_SLimit) ==========") - sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' - tdSql.query(sql) - tdSql.checkRows(3) - - sql = '''select last_row(*) from - ((select * from table_0) union all - (select * from table_1) union all - (select * from table_2));''' - tdSql.error(sql) - - tdLog.info("========== operator=20(OP_Distinct) ==========") - tdLog.info("========== operator=4(OP_TagScan) ==========") - sql = '''select distinct(t_bool) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(2) - sql = '''select distinct(loc) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_int) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_bigint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_smallint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_tinyint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_nchar) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_float) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_double) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(3) - sql = '''select distinct(tbname) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - - tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") - sql = '''select last(q_int),first(q_int) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_bool),first(q_bool) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_binary),first(q_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_float),first(q_float) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_double),first(q_double) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_ts),first(q_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), - last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), - first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), - last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), - first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=8(OP_Groupby) ==========") - sql = '''select stddev(q_int) from table_0 group by q_int;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' - tdSql.query(sql) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' - tdSql.query(sql) - - tdLog.info("========== operator=11(OP_TimeWindow) ==========") - sql = '''select last(q_int) from table_0 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=12(OP_SessionWindow) ==========") - sql = '''select count(*) from table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*) from regular_table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=13(OP_Fill) ==========") - sql = '''select sum(q_int) from table_0 - where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - #TD-5190 - sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 - where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - - tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") - sql = '''select avg(q_int) from stable_1 where ts=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having sum(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having avg(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having min(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having max(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having first(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having last(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - - tdLog.info("========== operator=21(OP_Join) ==========") - sql = '''select t1.q_int,t2.q_int from - (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from table_1) t1 , (select * from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from regular_table_1) t1 , (select * from table_0) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from stable_1) t1 , (select * from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from regular_table_1) t1 , (select * from stable_1) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.*,t3.* from - (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 - where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - - tdLog.info("========== operator=22(OP_StateWindow) ==========") - sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_1 state_window(q_bigint);''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 state_window(q_smallint);''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - - endTime = time.time() - print("total time %ds" % (endTime - startTime)) - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 3647f7defcd52e1ce374e6af3c504e50383cc441 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:05:42 +0800 Subject: [PATCH 27/38] [TD-5074]:test operator cost --- tests/pytest/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index eb068b6585..d54c413fba 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -241,7 +241,7 @@ python3 ./test.py -f query/queryStateWindow.py python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py -python3 ./test.py -f query/operator.py +python3 ./test.py -f query/operator_cost.py python3 test.py -f query/nestedQuery/queryWithSpread.py #stream From e4a67f8046445013895402e47352f55a308a3044 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 14:22:18 +0800 Subject: [PATCH 28/38] update local CI --- tests/Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index c75427b5f4..fb96b669ff 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -48,7 +48,7 @@ pipeline { } } stage('test_b1') { - agent{label 'master'} + agent{label 'slad2'} steps { pre_test() @@ -62,7 +62,7 @@ pipeline { } stage('test_crash_gen') { - agent{label "slad2"} + agent{label "slad3"} steps { pre_test() sh ''' @@ -141,7 +141,7 @@ pipeline { } stage('test_valgrind') { - agent{label "slad3"} + agent{label "slad4"} steps { pre_test() From 1d3df4a9661b4c44660b364796dfdd20222db80c Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Wed, 28 Jul 2021 14:31:16 +0800 Subject: [PATCH 29/38] [TD-5576]: fix localtime is unsafe in multi threads processing (#7045) * [TD-5576]: fix localtime is unsafe in multi threads processing * remove unecessary debug line --- src/plugins/http/src/httpJson.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c index 10300e9367..4bd66a17a3 100644 --- a/src/plugins/http/src/httpJson.c +++ b/src/plugins/http/src/httpJson.c @@ -264,8 +264,7 @@ void httpJsonUInt64(JsonBuf* buf, uint64_t num) { void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { char ts[35] = {0}; - struct tm* ptm; - + int32_t fractionLen; char* format = NULL; time_t quot = 0; @@ -301,8 +300,9 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { assert(false); } - ptm = localtime("); - int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm); + struct tm ptm = {0}; + localtime_r(", &ptm); + int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", &ptm); length += snprintf(ts + length, fractionLen, format, mod); httpJsonString(buf, ts, length); From 6e743d53e6f8b2e0e9ba32a91ba7b9f384953b09 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 28 Jul 2021 14:58:25 +0800 Subject: [PATCH 30/38] [td-225]fix the bug found by regression test. --- src/client/inc/tsclient.h | 2 + src/client/src/tscAsync.c | 102 ++++++++++++++--------------- src/client/src/tscServer.c | 10 ++- src/client/src/tscUtil.c | 22 +++++-- tests/pytest/insert/line_insert.py | 2 + 5 files changed, 79 insertions(+), 59 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 9a627d5cd6..904f5d4503 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -376,6 +376,8 @@ void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta); */ void tscFreeSqlResult(SSqlObj *pSql); +void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap); + /** * free sql object, release allocated resource * @param pObj diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 910a80d6af..c8c9fe85e3 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -336,7 +336,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { pRes->code = code; SSqlObj *sub = (SSqlObj*) res; - const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"[multi-]tableMeta"; + const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"multi-tableMeta"; if (code != TSDB_CODE_SUCCESS) { tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code)); goto _error; @@ -346,62 +346,56 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (pSql->pStream == NULL) { SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); -// assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) == 0); + if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { + tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self); - // super table subquery failure will be ignored -// if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | -// TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { - if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { - tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self); - - code = tsParseSql(pSql, false); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, pSql->self); - return; - } else if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - code = tscGetTableMeta(pSql, pTableMetaInfo); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, pSql->self); - return; - } else { - assert(code == TSDB_CODE_SUCCESS); - } - - (*pSql->fp)(pSql->param, pSql, code); - } else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert - tscImportDataFromFile(pSql); - } else { // sql string insert - tscHandleMultivnodeInsert(pSql); - } - } else { - if (pSql->retryReason != TSDB_CODE_SUCCESS) { - tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self); - tscResetSqlCmd(pCmd, false); - pSql->retryReason = TSDB_CODE_SUCCESS; - } else { - tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self); - } - - code = tsParseSql(pSql, true); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - taosReleaseRef(tscObjRef, pSql->self); - return; - } else if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd); - executeQuery(pSql, pQueryInfo1); + code = tsParseSql(pSql, false); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } else if (code != TSDB_CODE_SUCCESS) { + goto _error; } - taosReleaseRef(tscObjRef, pSql->self); - return; -// } + if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + code = tscGetTableMeta(pSql, pTableMetaInfo); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } else { + assert(code == TSDB_CODE_SUCCESS); + } + + (*pSql->fp)(pSql->param, pSql, code); + } else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert + tscImportDataFromFile(pSql); + } else { // sql string insert + tscHandleMultivnodeInsert(pSql); + } + } else { + if (pSql->retryReason != TSDB_CODE_SUCCESS) { + tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self); + tscResetSqlCmd(pCmd, false); + pSql->retryReason = TSDB_CODE_SUCCESS; + } else { + tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self); + } + + code = tsParseSql(pSql, true); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } else if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd); + executeQuery(pSql, pQueryInfo1); + } + + taosReleaseRef(tscObjRef, pSql->self); + return; } else { // stream computing tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pCmd->command); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index dd17e25f58..d631dfad4d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2224,6 +2224,9 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { if (pMultiMeta->metaClone == 1 || pTableMeta->tableType == TSDB_SUPER_TABLE) { STableMetaVgroupInfo p = {.pTableMeta = pTableMeta,}; size_t keyLen = strnlen(pMetaMsg->tableFname, TSDB_TABLE_FNAME_LEN); + void* t = taosHashGet(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen); + assert(t == NULL); + taosHashPut(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen, &p, sizeof(STableMetaVgroupInfo)); } else { freeMeta = true; @@ -2915,7 +2918,9 @@ static void freeElem(void* p) { * @return status code */ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { - SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd); + SSqlCmd* pCmd = &pSql->cmd; + + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); char name[TSDB_TABLE_FNAME_LEN] = {0}; @@ -2934,6 +2939,9 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { // remove stored tableMeta info in hash table tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self); + pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap); + pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SArray* pNameList = taosArrayInit(1, POINTER_BYTES); SArray* vgroupList = taosArrayInit(1, POINTER_BYTES); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ae5a7a69a8..299f60a50f 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1398,6 +1398,22 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) { } } +void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap) { + if (pTableMetaMap == NULL) { + return NULL; + } + + STableMetaVgroupInfo* p = taosHashIterate(pTableMetaMap, NULL); + while (p) { + taosArrayDestroy(p->vgroupIdList); + tfree(p->pTableMeta); + p = taosHashIterate(pTableMetaMap, p); + } + + taosHashCleanup(pTableMetaMap); + return NULL; +} + void tscFreeSqlResult(SSqlObj* pSql) { SSqlRes* pRes = &pSql->res; @@ -3481,11 +3497,9 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in SSqlCmd* pCmd = &pNew->cmd; pCmd->command = cmd; + tsem_init(&pNew->rspSem, 0 ,0); + if (tscAddQueryInfo(pCmd) != TSDB_CODE_SUCCESS) { -#ifdef __APPLE__ - // to satisfy later tsem_destroy in taos_free_result - tsem_init(&pNew->rspSem, 0, 0); -#endif // __APPLE__ tscFreeSqlObj(pNew); return NULL; } diff --git a/tests/pytest/insert/line_insert.py b/tests/pytest/insert/line_insert.py index ff3a32b0f7..53eaa55aa5 100644 --- a/tests/pytest/insert/line_insert.py +++ b/tests/pytest/insert/line_insert.py @@ -77,6 +77,8 @@ class TDTestCase: "sth,t1=4i64,t2=5f64,t4=5f64,ID=\"childtable\" c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641ms", "sth,t1=4i64,t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933654ms" ]) + tdSql.execute('reset query cache') + tdSql.query('select tbname, * from sth') tdSql.checkRows(2) From 8f0be342a8599ac2eb0fdc349d6f0d9d1078f142 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 15:06:30 +0800 Subject: [PATCH 31/38] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py index b94d5fa3b3..774a1e5f42 100644 --- a/tests/pytest/query/operator_cost.py +++ b/tests/pytest/query/operator_cost.py @@ -195,9 +195,6 @@ class TDTestCase: sql = '''select distinct(t_tinyint) from stable_1;''' tdSql.query(sql) tdSql.checkRows(6) - sql = '''select distinct(t_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) sql = '''select distinct(t_nchar) from stable_1;''' tdSql.query(sql) tdSql.checkRows(6) From 0b1c661fff52c9ba301edbab66009b50c226edca Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 28 Jul 2021 16:03:18 +0800 Subject: [PATCH 32/38] [TD-5592]: fix change KV row value coredump --- src/common/inc/tdataformat.h | 3 +- src/common/src/tdataformat.c | 83 +++++++++++++----------------------- 2 files changed, 32 insertions(+), 54 deletions(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 829e771c70..f8394dc271 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -55,7 +55,7 @@ extern "C" { typedef struct { int8_t type; // Column type int16_t colId; // column ID - uint16_t bytes; // column bytes + int16_t bytes; // column bytes (restore to int16_t in case of misuse) uint16_t offset; // point offset in SDataRow after the header part. } STColumn; @@ -366,6 +366,7 @@ typedef struct { #define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) #define kvRowFree(r) tfree(r) #define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) +#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r)) #define kvRowTKey(r) (*(TKEY *)(kvRowValues(r))) #define kvRowKey(r) tdGetKey(kvRowTKey(r)) #define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r)) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 9f7432c90d..5a63588611 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -634,42 +634,28 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { SKVRow nrow = NULL; void * ptr = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE); - if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row + if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row int diff = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type]; - nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff); + int nRowLen = kvRowLen(row) + sizeof(SColIdx) + diff; + int oRowCols = kvRowNCols(row); + + ASSERT(diff > 0); + nrow = malloc(nRowLen); if (nrow == NULL) return -1; - kvRowSetLen(nrow, kvRowLen(row) + (uint16_t)sizeof(SColIdx) + diff); - kvRowSetNCols(nrow, kvRowNCols(row) + 1); + kvRowSetLen(nrow, nRowLen); + kvRowSetNCols(nrow, oRowCols + 1); - if (ptr == NULL) { - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * kvRowNCols(row)); - memcpy(kvRowValues(nrow), kvRowValues(row), POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row))); - int colIdx = kvRowNCols(nrow) - 1; - kvRowColIdxAt(nrow, colIdx)->colId = colId; - kvRowColIdxAt(nrow, colIdx)->offset = (int16_t)(POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row))); - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff); - } else { - int16_t tlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row))); - if (tlen > 0) { - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), tlen); - memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset); - } + memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * oRowCols); + memcpy(kvRowValues(nrow), kvRowValues(row), kvRowValLen(row)); - int colIdx = tlen / sizeof(SColIdx); - kvRowColIdxAt(nrow, colIdx)->colId = colId; - kvRowColIdxAt(nrow, colIdx)->offset = ((SColIdx *)ptr)->offset; - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff); + pColIdx = kvRowColIdxAt(nrow, oRowCols); + pColIdx->colId = colId; + pColIdx->offset = kvRowValLen(row); - for (int i = colIdx; i < kvRowNCols(row); i++) { - kvRowColIdxAt(nrow, i + 1)->colId = kvRowColIdxAt(row, i)->colId; - kvRowColIdxAt(nrow, i + 1)->offset = kvRowColIdxAt(row, i)->offset + diff; - } - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx)), - POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx))) + memcpy(kvRowColVal(nrow, pColIdx), value, diff); // copy new value - ); - } + tdSortKVRowByColIdx(nrow); *orow = nrow; free(row); @@ -680,9 +666,8 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place memcpy(pOldVal, value, varDataTLen(value)); - } else { // need to reallocate the memory - uint16_t diff = varDataTLen(value) - varDataTLen(pOldVal); - uint16_t nlen = kvRowLen(row) + diff; + } else { // need to reallocate the memory + int16_t nlen = kvRowLen(row) + (varDataTLen(value) - varDataTLen(pOldVal)); ASSERT(nlen > 0); nrow = malloc(nlen); if (nrow == NULL) return -1; @@ -690,30 +675,22 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { kvRowSetLen(nrow, nlen); kvRowSetNCols(nrow, kvRowNCols(row)); - // Copy part ahead - nlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row))); - ASSERT(nlen % sizeof(SColIdx) == 0); - if (nlen > 0) { - ASSERT(((SColIdx *)ptr)->offset > 0); - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), nlen); - memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset); + int zsize = sizeof(SColIdx) * kvRowNCols(row) + ((SColIdx *)ptr)->offset; + memcpy(kvRowColIdx(nrow), kvRowColIdx(row), zsize); + memcpy(kvRowColVal(nrow, ((SColIdx *)ptr)), value, varDataTLen(value)); + // Copy left value part + int lsize = kvRowLen(row) - TD_KV_ROW_HEAD_SIZE - zsize - varDataTLen(pOldVal); + if (lsize > 0) { + memcpy(POINTER_SHIFT(nrow, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(value)), + POINTER_SHIFT(row, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(pOldVal)), lsize); } - // Construct current column value - int colIdx = nlen / sizeof(SColIdx); - pColIdx = kvRowColIdxAt(nrow, colIdx); - pColIdx->colId = ((SColIdx *)ptr)->colId; - pColIdx->offset = ((SColIdx *)ptr)->offset; - memcpy(kvRowColVal(nrow, pColIdx), value, varDataTLen(value)); - - // Construct columns after - if (kvRowNCols(nrow) - colIdx - 1 > 0) { - for (int i = colIdx + 1; i < kvRowNCols(nrow); i++) { - kvRowColIdxAt(nrow, i)->colId = kvRowColIdxAt(row, i)->colId; - kvRowColIdxAt(nrow, i)->offset = kvRowColIdxAt(row, i)->offset + diff; + for (int i = 0; i < kvRowNCols(nrow); i++) { + pColIdx = kvRowColIdxAt(nrow, i); + + if (pColIdx->offset > ((SColIdx *)ptr)->offset) { + pColIdx->offset = pColIdx->offset - varDataTLen(pOldVal) + varDataTLen(value); } - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)), - POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)))); } *orow = nrow; From aac0aeddfdf81725e9deb21b79301c3816cbf4bb Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 16:10:06 +0800 Subject: [PATCH 33/38] add SANITIZER parameter --- tests/Jenkinsfile | 29 ++++++++++++++++++++++++++++- tests/mas/Jenkinsfile | 29 ++++++++++++++++++++++++++++- 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index fb96b669ff..ed54515ba8 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -1,5 +1,32 @@ def pre_test(){ + sh ''' + sudo rmtaos||echo 'no taosd installed' + ''' + sh ''' + cd ${WKC} + git reset --hard + git checkout $BRANCH_NAME + git pull + git submodule update + cd ${WK} + git reset --hard + git checkout $BRANCH_NAME + git pull + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake -DMEMORY_SANITIZER=true .. > /dev/null + make > /dev/null + make install > /dev/null + pip3 install ${WKC}/src/connector/python + ''' + return 1 +} +def pre_test_p(){ + sh ''' sudo rmtaos||echo 'no taosd installed' ''' @@ -39,7 +66,7 @@ pipeline { stage('pytest') { agent{label 'slad1'} steps { - pre_test() + pre_test_p() sh ''' cd ${WKC}/tests find pytest -name '*'sql|xargs rm -rf diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile index ae2286298f..4dcc1d9c18 100644 --- a/tests/mas/Jenkinsfile +++ b/tests/mas/Jenkinsfile @@ -1,5 +1,32 @@ def pre_test(){ + sh ''' + sudo rmtaos||echo 'no taosd installed' + ''' + sh ''' + cd ${WKC} + git reset --hard + git checkout $BRANCH_NAME + git pull + git submodule update + cd ${WK} + git reset --hard + git checkout $BRANCH_NAME + git pull + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake -DMEMORY_SANITIZER=true .. > /dev/null + make > /dev/null + make install > /dev/null + pip3 install ${WKC}/src/connector/python/ || echo 0 + ''' + return 1 +} +def pre_test_p(){ + sh ''' sudo rmtaos||echo 'no taosd installed' ''' @@ -39,7 +66,7 @@ pipeline { stage('pytest') { agent{label 'slam1'} steps { - pre_test() + pre_test_p() sh ''' cd ${WKC}/tests find pytest -name '*'sql|xargs rm -rf From 2892d2624c953b0a2c2fa6865016a791c611ce49 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 28 Jul 2021 17:38:38 +0800 Subject: [PATCH 34/38] [TD-2639] : fix typo. --- documentation20/cn/03.architecture/02.replica/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/03.architecture/02.replica/docs.md b/documentation20/cn/03.architecture/02.replica/docs.md index 183306eed8..27ac7f123c 100644 --- a/documentation20/cn/03.architecture/02.replica/docs.md +++ b/documentation20/cn/03.architecture/02.replica/docs.md @@ -111,7 +111,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log) 3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。 4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理 -5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。 +5. vnode B应用在写入成功后,都需要调用syncConfirmForward通知sync模块已经写入成功。 6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。 7. 如果quorum大于1,vnode A需要等待vnode B或其他副本对Forward消息的确认。 8. 如果quorum大于1,vnode A收到quorum-1条确认消息后,调用回调函数confirmForward,通知应用写入成功。 From 2e7e28781848510446e33f5e122bcaeb28c90f89 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 28 Jul 2021 17:53:59 +0800 Subject: [PATCH 35/38] [TD-5591]: taosdemo coredump when query 4096 columns. (#7054) --- src/kit/taosdemo/taosdemo.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c4dec6a231..32bad230cf 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -92,7 +92,6 @@ extern char configDir[]; #define MAX_SUPER_TABLE_COUNT 200 #define MAX_QUERY_SQL_COUNT 100 -#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE #define MAX_DATABASE_COUNT 256 #define INPUT_BUF_LEN 256 @@ -383,7 +382,7 @@ typedef struct SpecifiedQueryInfo_S { uint64_t queryTimes; bool subscribeRestart; int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume[MAX_QUERY_SQL_COUNT]; int endAfterConsume[MAX_QUERY_SQL_COUNT]; @@ -406,7 +405,7 @@ typedef struct SuperQueryInfo_S { int64_t childTblCount; char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume; int endAfterConsume; @@ -1253,14 +1252,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { // fetch the records row by row while((row = taos_fetch_row(res))) { - if (totalLen >= 100*1024*1024 - 32000) { + if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) { if (strlen(pThreadInfo->filePath) > 0) appendResultBufToFile(databuf, pThreadInfo); totalLen = 0; memset(databuf, 0, 100*1024*1024); } num_rows++; - char temp[16000] = {0}; + char temp[HEAD_BUFF_LEN] = {0}; int len = taos_print_row(temp, row, fields, num_fields); len += sprintf(temp + len, "\n"); //printf("query result:%s\n", temp); @@ -2165,15 +2164,15 @@ static void printfDbInfoForQueryToFile( } static void printfQuerySystemInfo(TAOS * taos) { - char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; - char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; + char filename[BUFFER_SIZE+1] = {0}; + char buffer[BUFFER_SIZE+1] = {0}; TAOS_RES* res; time_t t; struct tm* lt; time(&t); lt = localtime(&t); - snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", + snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); @@ -2205,12 +2204,12 @@ static void printfQuerySystemInfo(TAOS * taos) { printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); + snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); // show db.stables - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); + snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); free(dbInfos[i]); @@ -4549,7 +4548,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], - sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + sqlStr->valuestring, BUFFER_SIZE); // default value is -1, which mean infinite loop g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; @@ -4771,7 +4770,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - MAX_QUERY_SQL_LENGTH); + BUFFER_SIZE); cJSON *result = cJSON_GetObjectItem(sql, "result"); if (result != NULL && result->type == cJSON_String @@ -7425,14 +7424,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { tstrncpy(outSql, inSql, pos - inSql + 1); //printf("1: %s\n", outSql); - strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1); + strncat(outSql, subTblName, BUFFER_SIZE - 1); //printf("2: %s\n", outSql); - strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1); + strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1); //printf("3: %s\n", outSql); } static void *superTableQuery(void *sarg) { - char sqlstr[MAX_QUERY_SQL_LENGTH]; + char sqlstr[BUFFER_SIZE]; threadInfo *pThreadInfo = (threadInfo *)sarg; setThreadName("superTableQuery"); @@ -7735,7 +7734,7 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[MAX_QUERY_SQL_LENGTH]; + char subSqlstr[BUFFER_SIZE]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; uint64_t tsubSeq; From a9c51e251a3f3c78a67168e63065a6e12283cb7b Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 18:28:35 +0800 Subject: [PATCH 36/38] remove systemctl --- tests/Jenkinsfile | 4 ++-- tests/mas/Jenkinsfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index ed54515ba8..b5b068381d 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -119,7 +119,7 @@ pipeline { } sh''' - systemctl start taosd + nohup taosd >/dev/null & sleep 10 ''' catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -155,7 +155,7 @@ pipeline { ''' } sh ''' - systemctl stop taosd + pkill -9 taosd cd ${WKC}/tests ./test-all.sh b2 date diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile index 4dcc1d9c18..52b29da92e 100644 --- a/tests/mas/Jenkinsfile +++ b/tests/mas/Jenkinsfile @@ -119,7 +119,7 @@ pipeline { } sh''' - systemctl start taosd + nohup taosd >/dev/null & sleep 10 ''' catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -163,7 +163,7 @@ pipeline { ''' } sh ''' - systemctl stop taosd + pkill -9 taosd cd ${WKC}/tests ./test-all.sh b2 date From 175fd1c93d7b4073a0ffe93e4ea236f1cdb32235 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 18:32:34 +0800 Subject: [PATCH 37/38] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py index 774a1e5f42..27de3531eb 100644 --- a/tests/pytest/query/operator_cost.py +++ b/tests/pytest/query/operator_cost.py @@ -207,9 +207,9 @@ class TDTestCase: sql = '''select distinct(t_ts) from stable_1;''' tdSql.query(sql) tdSql.checkRows(3) - sql = '''select distinct(tbname) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) + # sql = '''select distinct(tbname) from stable_1;''' + # tdSql.query(sql) + # tdSql.checkRows(6) tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") sql = '''select last(q_int),first(q_int) from stable_1;''' From 21bdbb9e507a496cd8e7b68b8f2a67ed35fd6be6 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 29 Jul 2021 10:32:20 +0800 Subject: [PATCH 38/38] [TD-3666] : fix description about binary storage length. --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 72f4876dcf..6d39c25565 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | +| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | | 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | | 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | | 9 | BOOL | 1 | 布尔型,{true, false} |