From 9c4c8493ad66330c64fce694ec12ac352e8303e5 Mon Sep 17 00:00:00 2001 From: SunShine Chan Date: Sat, 17 Jul 2021 12:11:32 +0800 Subject: [PATCH 01/46] Update docs.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit REMOVE "vnode之间的同步复制仅仅企业版支持" --- documentation20/cn/03.architecture/docs.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md index d22855198a..b481bea9f8 100644 --- a/documentation20/cn/03.architecture/docs.md +++ b/documentation20/cn/03.architecture/docs.md @@ -323,8 +323,6 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存 采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。 -注:vnode之间的同步复制仅仅企业版支持 - ## 缓存与持久化 ### 缓存 From 5cdbcdafc98aab69b595ee8c100f42fad0428479 Mon Sep 17 00:00:00 2001 From: SunShine Chan Date: Sat, 17 Jul 2021 12:13:08 +0800 Subject: [PATCH 02/46] Update docs.md REMOVE "Note: synchronous replication between vnodes is only supported in Enterprise Edition" --- documentation20/en/03.architecture/docs.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index 2e5fc7bd18..92813da2c4 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -322,8 +322,6 @@ For scenarios with higher data consistency requirements, asynchronous data repli With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication. -Note: synchronous replication between vnodes is only supported in Enterprise Edition - ## Caching and Persistence ### Caching From 1bdd6838724d0fa00f4948bd3c702c59b97fe44a Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Thu, 22 Jul 2021 19:59:36 +0800 Subject: [PATCH 03/46] [TD-5369] taosdemo test case for nano support , the sql files is must be here! --- tests/pytest/fulltest.sh | 8 + .../tools/taosdemoAllTest/nano_samples.csv | 100 +++++++++++ .../tools/taosdemoAllTest/nano_sampletags.csv | 100 +++++++++++ .../taosdemoTestNanoCreateDB.sql | 7 + .../taosdemoTestNanoDatabase.json | 88 ++++++++++ .../taosdemoTestNanoDatabaseInsertForSub.json | 84 +++++++++ .../taosdemoTestNanoDatabaseNow.json | 62 +++++++ .../taosdemoTestNanoDatabasecsv.json | 84 +++++++++ .../taosdemoTestSupportNanoInsert.py | 162 ++++++++++++++++++ .../taosdemoTestSupportNanoQuery.json | 92 ++++++++++ .../taosdemoTestSupportNanoQuery.py | 157 +++++++++++++++++ .../taosdemoTestSupportNanoQuerycsv.json | 110 ++++++++++++ .../taosdemoTestSupportNanoSubscribe.json | 32 ++++ .../taosdemoTestSupportNanosubscribe.py | 123 +++++++++++++ 14 files changed, 1209 insertions(+) create mode 100644 tests/pytest/tools/taosdemoAllTest/nano_samples.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 80d45fbc31..99a15e2e71 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -361,4 +361,12 @@ python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_cacheLastRow.py python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py + +# nano support +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py + + #======================p4-end=============== +python3 test.py -f tools/taosdemoAllTest/pytest.py \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/nano_samples.csv b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv new file mode 100644 index 0000000000..5fc779b41b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv @@ -0,0 +1,100 @@ +8.855,"binary_str0" ,1626870128248246976 +8.75,"binary_str1" ,1626870128249060032 +5.44,"binary_str2" ,1626870128249067968 +8.45,"binary_str3" ,1626870128249072064 +4.07,"binary_str4" ,1626870128249075904 +6.97,"binary_str5" ,1626870128249078976 +6.86,"binary_str6" ,1626870128249082048 +1.585,"binary_str7" ,1626870128249085120 +1.4,"binary_str8" ,1626870128249087936 +5.135,"binary_str9" ,1626870128249092032 +3.15,"binary_str10" ,1626870128249095104 +1.765,"binary_str11" ,1626870128249097920 +7.71,"binary_str12" ,1626870128249100992 +3.91,"binary_str13" ,1626870128249104064 +5.615,"binary_str14" ,1626870128249106880 +9.495,"binary_str15" ,1626870128249109952 +3.825,"binary_str16" ,1626870128249113024 +1.94,"binary_str17" ,1626870128249117120 +5.385,"binary_str18" ,1626870128249119936 +7.075,"binary_str19" ,1626870128249123008 +5.715,"binary_str20" ,1626870128249126080 +1.83,"binary_str21" ,1626870128249128896 +6.365,"binary_str22" ,1626870128249131968 +6.55,"binary_str23" ,1626870128249135040 +6.315,"binary_str24" ,1626870128249138112 +3.82,"binary_str25" ,1626870128249140928 +2.455,"binary_str26" ,1626870128249145024 +7.795,"binary_str27" ,1626870128249148096 +2.47,"binary_str28" ,1626870128249150912 +1.37,"binary_str29" ,1626870128249155008 +5.39,"binary_str30" ,1626870128249158080 +5.13,"binary_str31" ,1626870128249160896 +4.09,"binary_str32" ,1626870128249163968 +5.855,"binary_str33" ,1626870128249167040 +0.17,"binary_str34" ,1626870128249170112 +1.955,"binary_str35" ,1626870128249173952 +0.585,"binary_str36" ,1626870128249178048 +0.33,"binary_str37" ,1626870128249181120 +7.925,"binary_str38" ,1626870128249183936 +9.685,"binary_str39" ,1626870128249187008 +2.6,"binary_str40" ,1626870128249191104 +5.705,"binary_str41" ,1626870128249193920 +3.965,"binary_str42" ,1626870128249196992 +4.43,"binary_str43" ,1626870128249200064 +8.73,"binary_str44" ,1626870128249202880 +3.105,"binary_str45" ,1626870128249205952 +9.39,"binary_str46" ,1626870128249209024 +2.825,"binary_str47" ,1626870128249212096 +9.675,"binary_str48" ,1626870128249214912 +9.99,"binary_str49" ,1626870128249217984 +4.51,"binary_str50" ,1626870128249221056 +4.94,"binary_str51" ,1626870128249223872 +7.72,"binary_str52" ,1626870128249226944 +4.135,"binary_str53" ,1626870128249231040 +2.325,"binary_str54" ,1626870128249234112 +4.585,"binary_str55" ,1626870128249236928 +8.76,"binary_str56" ,1626870128249240000 +4.715,"binary_str57" ,1626870128249243072 +0.56,"binary_str58" ,1626870128249245888 +5.35,"binary_str59" ,1626870128249249984 +5.075,"binary_str60" ,1626870128249253056 +6.665,"binary_str61" ,1626870128249256128 +7.13,"binary_str62" ,1626870128249258944 +2.775,"binary_str63" ,1626870128249262016 +5.775,"binary_str64" ,1626870128249265088 +1.62,"binary_str65" ,1626870128249267904 +1.625,"binary_str66" ,1626870128249270976 +8.15,"binary_str67" ,1626870128249274048 +0.75,"binary_str68" ,1626870128249277120 +3.265,"binary_str69" ,1626870128249280960 +8.585,"binary_str70" ,1626870128249284032 +1.88,"binary_str71" ,1626870128249287104 +8.44,"binary_str72" ,1626870128249289920 +5.12,"binary_str73" ,1626870128249295040 +2.58,"binary_str74" ,1626870128249298112 +9.42,"binary_str75" ,1626870128249300928 +1.765,"binary_str76" ,1626870128249304000 +2.66,"binary_str77" ,1626870128249308096 +1.405,"binary_str78" ,1626870128249310912 +5.595,"binary_str79" ,1626870128249315008 +2.28,"binary_str80" ,1626870128249318080 +9.24,"binary_str81" ,1626870128249320896 +9.03,"binary_str82" ,1626870128249323968 +6.055,"binary_str83" ,1626870128249327040 +1.74,"binary_str84" ,1626870128249330112 +5.77,"binary_str85" ,1626870128249332928 +1.97,"binary_str86" ,1626870128249336000 +0.3,"binary_str87" ,1626870128249339072 +7.145,"binary_str88" ,1626870128249342912 +0.88,"binary_str89" ,1626870128249345984 +8.025,"binary_str90" ,1626870128249349056 +4.81,"binary_str91" ,1626870128249351872 +0.725,"binary_str92" ,1626870128249355968 +3.85,"binary_str93" ,1626870128249359040 +9.455,"binary_str94" ,1626870128249362112 +2.265,"binary_str95" ,1626870128249364928 +3.985,"binary_str96" ,1626870128249368000 +9.375,"binary_str97" ,1626870128249371072 +0.2,"binary_str98" ,1626870128249373888 +6.95,"binary_str99" ,1626870128249377984 diff --git a/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv new file mode 100644 index 0000000000..18fb855d6d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv @@ -0,0 +1,100 @@ +"string0",7,8.615 +"string1",4,9.895 +"string2",3,2.92 +"string3",3,5.62 +"string4",7,1.615 +"string5",6,1.45 +"string6",5,7.48 +"string7",7,3.01 +"string8",5,4.76 +"string9",10,7.09 +"string10",2,8.38 +"string11",7,8.65 +"string12",5,5.025 +"string13",10,5.765 +"string14",2,4.57 +"string15",2,1.03 +"string16",7,6.98 +"string17",10,0.23 +"string18",7,5.815 +"string19",1,2.37 +"string20",10,8.865 +"string21",3,1.235 +"string22",2,8.62 +"string23",9,1.045 +"string24",8,4.34 +"string25",1,5.455 +"string26",2,4.475 +"string27",1,6.95 +"string28",2,3.39 +"string29",3,6.79 +"string30",7,9.735 +"string31",1,9.79 +"string32",10,9.955 +"string33",1,5.095 +"string34",3,3.86 +"string35",9,5.105 +"string36",10,4.22 +"string37",1,2.78 +"string38",9,6.345 +"string39",1,0.975 +"string40",5,6.16 +"string41",4,7.735 +"string42",5,6.6 +"string43",8,2.845 +"string44",1,0.655 +"string45",3,2.995 +"string46",9,3.6 +"string47",8,3.47 +"string48",3,7.98 +"string49",6,2.225 +"string50",9,5.44 +"string51",4,6.335 +"string52",3,2.955 +"string53",1,0.565 +"string54",6,5.575 +"string55",6,9.905 +"string56",9,6.025 +"string57",8,0.94 +"string58",10,0.15 +"string59",8,1.555 +"string60",4,2.28 +"string61",2,8.29 +"string62",9,6.22 +"string63",6,3.35 +"string64",10,6.7 +"string65",3,9.345 +"string66",7,9.815 +"string67",1,5.365 +"string68",10,3.81 +"string69",1,6.405 +"string70",8,2.715 +"string71",3,8.58 +"string72",8,6.34 +"string73",2,7.49 +"string74",4,8.64 +"string75",3,8.995 +"string76",7,3.465 +"string77",1,7.64 +"string78",6,3.65 +"string79",6,1.4 +"string80",6,5.875 +"string81",2,1.22 +"string82",5,7.87 +"string83",9,8.41 +"string84",9,8.9 +"string85",9,3.89 +"string86",2,5.0 +"string87",2,4.495 +"string88",4,2.835 +"string89",3,5.895 +"string90",7,8.41 +"string91",5,5.125 +"string92",7,9.165 +"string93",5,8.315 +"string94",10,7.485 +"string95",7,4.635 +"string96",2,6.015 +"string97",8,0.595 +"string98",3,8.79 +"string99",4,1.72 diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql new file mode 100644 index 0000000000..e79e09592c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql @@ -0,0 +1,7 @@ +drop database if exists nsdbsql; +create database nsdbsql precision "ns" keep 36 days 6 update 1; +use nsdbsql; +CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); +CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); +INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); +INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76); diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json new file mode 100644 index 0000000000..9010415fe6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json new file mode 100644 index 0000000000..0726f3905d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "subnsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json new file mode 100644 index 0000000000..d2542a0eba --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json new file mode 100644 index 0000000000..867619ed8c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdbcsv", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py new file mode 100644 index 0000000000..010308d037 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -0,0 +1,162 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert data from a special timestamp + # check stable stb0 + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) + tdSql.execute("use nsdb") + tdSql.query("show stables") + tdSql.checkData(0, 4, 1000) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000000) + tdSql.query("describe stb0") + tdSql.getData(8, 1) + tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.getData(0, 0) + + # check stable stb1 which is insert with disord + + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb1_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10000000) + # check c8 is an nano timestamp + tdSql.query("describe stb1") + tdSql.checkDataType(8, 1,"TIMESTAMP") + # check insert timestamp_step is nano_second + tdSql.query("select last(ts) from stb1") + tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") + + # insert data from now time + + # check stable stb0 + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath) + tdSql.execute("use nsdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 1000) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + # check c8 is an nano timestamp + tdSql.query("describe stb0") + tdSql.checkDataType(8,1,"TIMESTAMP") + + # insert by csv files and timetamp is long int , strings in ts and cols + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") + tdSql.checkData(0, 0, 10000) + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") + + + + + + + + + + + # taosdemo test insert with command and parameter , detals show taosdemo --help + + os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 600) + + os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 600) + + # check taosdemo -s + + os.system("%staosdemo -s tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql -y " % binPath) + tdSql.query("select count(*) from nsdbsql.meters") + tdSql.checkData(0, 0, 2) + + + + os.system("rm -rf ./res.txt") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json new file mode 100644 index 0000000000..fff1017588 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json @@ -0,0 +1,92 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdb", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts>now-20d ;", + "result": "./query_res2.txt" + }, + { + "sql": "select max(c10) from stb0;", + "result": "./query_res3.txt" + }, + { + "sql": "select min(c1) from stb0;", + "result": "./query_res4.txt" + }, + { + "sql": "select avg(c1) from stb0;", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c10) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c1) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c10) from xxxx ;", + "result": "./query_res_tb7.txt" + + } + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py new file mode 100644 index 0000000000..21a7037ce6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py @@ -0,0 +1,157 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # query: query test for nanoSecond with where and max min groupby order + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) + + tdSql.execute("use nsdb") + + # use where to filter + + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") + tdSql.checkData(0, 0, 3999000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") + tdSql.checkData(0, 0, 1000000) + + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") + tdSql.checkData(0, 0, 3999) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") + tdSql.checkData(0, 0, 1000) + + + # select max min avg from special col + tdSql.query("select max(c10) from stb0;") + print("select max(c10) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select max(c10) from tb0_0;") + print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0)) + + + tdSql.query("select min(c1) from stb0;") + print( "select min(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select min(c1) from tb0_0;") + print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from stb0;") + print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from tb0_0;") + print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select count(*) from stb0 group by tbname;") + tdSql.checkData(0, 0, 10000) + tdSql.checkData(100, 0, 10000) + + # query : query above sqls by taosdemo and continuously + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json -y " % binPath) + + + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") + tdSql.checkData(0, 0, 10000) + tdSql.execute('select count(*) from stb0 where c2 > 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"') + tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000') + tdSql.query('select avg(c0) from stb0 interval(5000000000b)') + tdSql.checkRows(1) + + tdSql.query('select avg(c0) from stb0 interval(100000000b)') + tdSql.checkRows(10) + + tdSql.error('select avg(c0) from stb0 interval(1b)') + tdSql.error('select avg(c0) from stb0 interval(999b)') + + tdSql.query('select avg(c0) from stb0 interval(1000b)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(1u)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)') + tdSql.checkRows(10) + + # query : query above sqls by taosdemo and continuously + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json -y " % binPath) + + os.system("rm -rf ./query_res*.txt*") + os.system("rm -rf tools/taosdemoAllTest/*.py.sql") + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json new file mode 100644 index 0000000000..aa95837a33 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json @@ -0,0 +1,110 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdbcsv", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts > now -22d-1h-3s ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts < 1626918583000000000 ;", + "result": "./query_res2.txt" + }, + { + "sql": "select count(*) from stb0 where c2 <> 162687012800000000';", + "result": "./query_res3.txt" + }, + { + "sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";", + "result": "./query_res4.txt" + }, + { + "sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + }, + { + "sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;", + "result":"./query_res7.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(5000000000b);", + "result":"./query_res8.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);", + "result":"./query_res9.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c0) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c0) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c0) from xxxx ;", + "result": "./query_res_tb7.txt" + + }, + { + "sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;", + "result": "./query_res_tb8.txt" + + } + + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json new file mode 100644 index 0000000000..26d405b65b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json @@ -0,0 +1,32 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "subnsdb", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":10000, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb0 where ts > now -20d-1h-3s ;", + "result": "./subscribe_res1.txt" + }, + { + "sql": "select * from stb0 where ts < 1626918583000000000 ;", + "result": "./subscribe_res2.txt" + }] + + } +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py new file mode 100644 index 0000000000..f6324577c1 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py @@ -0,0 +1,123 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime +import subprocess + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # get the number of subscriptions + def subTimes(self,filename): + self.filename = filename + command = 'cat %s |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + # assert results + def assertCheck(self,filename,subResult,expectResult): + self.filename = filename + self.subResult = subResult + self.expectResult = expectResult + args0 = (filename, subResult, expectResult) + assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # clear env + os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe_res*") + + + # insert data + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) + os.system("nohup %staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json &" % binPath) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + + + # merge result files + sleep(20) + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") + os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") + + + # correct subscribeTimes testcase + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) + + subTimes1 = self.subTimes("all_subscribe_res1.txt") + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,0) + + subTimes2 = self.subTimes("all_subscribe_res2.txt") + self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) + + + # insert extral data + tdSql.execute("use subnsdb") + tdSql.execute("insert into tb0_0 values(now,100.1000,'subtest1',now-1s)") + sleep(1) + + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) + + + + # correct data testcase + os.system("kill -9 %d" % query_pid) + sleep(3) + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe*") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From c133eb1cfea4803c8b851eb4540de82f2bf5daf8 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Fri, 23 Jul 2021 10:07:02 +0800 Subject: [PATCH 04/46] [TD-5369] modify an error about insert --- .../tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 010308d037..76b5871de6 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -113,10 +113,6 @@ class TDTestCase: tdSql.checkDataType(3, 1, "TIMESTAMP") tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") - tdSql.checkData(0, 0, 10000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") - tdSql.checkData(0, 0, 10000) tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") tdSql.checkData(0, 0, 10000) From ea0cc35af4d2db6f92f0a0b74e511deb01d2633c Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 23 Jul 2021 14:49:15 +0800 Subject: [PATCH 05/46] [TD-5074]test operator --- tests/pytest/fulltest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index e452ed3de4..eb068b6585 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -241,6 +241,7 @@ python3 ./test.py -f query/queryStateWindow.py python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py +python3 ./test.py -f query/operator.py python3 test.py -f query/nestedQuery/queryWithSpread.py #stream From 11d5b3231d91d78609a0141ef3f3200579f1813d Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 23 Jul 2021 14:49:27 +0800 Subject: [PATCH 06/46] [TD-5074]test operator --- tests/pytest/query/operator.py | 539 +++++++++++++++++++++++++++++++++ 1 file changed, 539 insertions(+) create mode 100644 tests/pytest/query/operator.py diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py new file mode 100644 index 0000000000..b94d5fa3b3 --- /dev/null +++ b/tests/pytest/query/operator.py @@ -0,0 +1,539 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5074 + + startTime = time.time() + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + tdSql.execute('''create table table_21 using stable_2 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdLog.info("========== operator=1(OP_TableScan) ==========") + tdLog.info("========== operator=7(OP_Project) ==========") + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=6(OP_Aggregate) ==========") + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=9(OP_Limit) ==========") + sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' + tdSql.query(sql) + tdSql.checkRows(5) + sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=16(OP_DummyInput) ==========") + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + + tdLog.info("========== operator=10(OP_SLimit) ==========") + sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=20(OP_Distinct) ==========") + tdLog.info("========== operator=4(OP_TagScan) ==========") + sql = '''select distinct(t_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + sql = '''select distinct(loc) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + sql = '''select distinct(tbname) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + + tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") + sql = '''select last(q_int),first(q_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bool),first(q_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_binary),first(q_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_float),first(q_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_double),first(q_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_ts),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), + first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), + first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=8(OP_Groupby) ==========") + sql = '''select stddev(q_int) from table_0 group by q_int;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' + tdSql.query(sql) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' + tdSql.query(sql) + + tdLog.info("========== operator=11(OP_TimeWindow) ==========") + sql = '''select last(q_int) from table_0 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=12(OP_SessionWindow) ==========") + sql = '''select count(*) from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*) from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=13(OP_Fill) ==========") + sql = '''select sum(q_int) from table_0 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + #TD-5190 + sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + + tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") + sql = '''select avg(q_int) from stable_1 where ts=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having sum(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having avg(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having min(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having max(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having first(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having last(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + + tdLog.info("========== operator=21(OP_Join) ==========") + sql = '''select t1.q_int,t2.q_int from + (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from table_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from table_0) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from stable_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.*,t3.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 + where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + + tdLog.info("========== operator=22(OP_StateWindow) ==========") + sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 state_window(q_bigint);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 state_window(q_smallint);''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 8c62d84a3d5f1d52191bcf4a99a9b4d75b21eca9 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Fri, 23 Jul 2021 20:05:08 +0800 Subject: [PATCH 07/46] [TD-5369] add test case about taosdemo params 'time_step' --- tests/pytest/fulltest.sh | 1 + .../taosdemoAllTest/taosdemoInsertMSDB.json | 63 ++++++++++ .../taosdemoAllTest/taosdemoInsertNanoDB.json | 63 ++++++++++ .../taosdemoAllTest/taosdemoInsertUSDB.json | 63 ++++++++++ .../taosdemoTestInsertTime_step.py | 115 ++++++++++++++++++ .../taosdemoTestNanoCreateDB.sql | 7 -- .../taosdemoTestSupportNanoInsert.py | 36 +++--- 7 files changed, 324 insertions(+), 24 deletions(-) create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py delete mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c9dd6d8e79..ab7dedc959 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -368,6 +368,7 @@ python3 test.py -f alter/alter_create_exception.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py python3 ./test.py -f insert/flushwhiledrop.py #======================p4-end=============== diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json new file mode 100644 index 0000000000..49ab6f3a43 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb3", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json new file mode 100644 index 0000000000..9a35df917d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json new file mode 100644 index 0000000000..631179dbae --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "us", + "keep": 36, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py new file mode 100644 index 0000000000..7b3b865df9 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py @@ -0,0 +1,115 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + + # check the params of taosdemo about time_step is nano + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertNanoDB.json -y " % binPath) + tdSql.execute("use testdb1") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000") + + # check the params of taosdemo about time_step is us + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertUSDB.json -y " % binPath) + tdSql.execute("use testdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000") + + # check the params of taosdemo about time_step is ms + os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertMSDB.json -y " % binPath) + tdSql.execute("use testdb3") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:01:39.000") + + + os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql deleted file mode 100644 index e79e09592c..0000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql +++ /dev/null @@ -1,7 +0,0 @@ -drop database if exists nsdbsql; -create database nsdbsql precision "ns" keep 36 days 6 update 1; -use nsdbsql; -CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); -CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); -INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); -INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76); diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 76b5871de6..88a917da85 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -64,8 +64,7 @@ class TDTestCase: tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 10000000) tdSql.query("describe stb0") - tdSql.getData(8, 1) - tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1,"TIMESTAMP") tdSql.query("select last(ts) from stb0") tdSql.getData(0, 0) @@ -79,7 +78,7 @@ class TDTestCase: tdSql.checkData(0, 0, 10000000) # check c8 is an nano timestamp tdSql.query("describe stb1") - tdSql.checkDataType(8, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1,"TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") @@ -99,7 +98,7 @@ class TDTestCase: tdSql.checkData(0, 0, 100000) # check c8 is an nano timestamp tdSql.query("describe stb0") - tdSql.checkDataType(8,1,"TIMESTAMP") + tdSql.checkDataType(9,1,"TIMESTAMP") # insert by csv files and timetamp is long int , strings in ts and cols @@ -118,16 +117,6 @@ class TDTestCase: os.system("rm -rf ./insert_res.txt") os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") - - - - - - - - - - # taosdemo test insert with command and parameter , detals show taosdemo --help os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) @@ -140,13 +129,26 @@ class TDTestCase: # check taosdemo -s - os.system("%staosdemo -s tools/taosdemoAllTest/taosdemoTestNanoCreateDB.sql -y " % binPath) + sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;', + 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', + 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] + + with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files: + for sql in sqls_ls: + sql_files.write(sql+"\n") + sql_files.close() + + sleep(10) + + os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) tdSql.query("select count(*) from nsdbsql.meters") tdSql.checkData(0, 0, 2) - - os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") def stop(self): From 4eeea0d8d6971556b81c3a8cb0526ed27ff1b27c Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 26 Jul 2021 10:14:47 +0800 Subject: [PATCH 08/46] [TD-5523]: arm32 4byte long error. --- src/query/src/qExecutor.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 982c45c441..d479e90415 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -149,12 +149,12 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) { int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor); tm.tm_year = mon / 12; tm.tm_mon = mon % 12; - tw->skey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); + tw->skey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); mon = (int)(mon + interval); tm.tm_year = mon / 12; tm.tm_mon = mon % 12; - tw->ekey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); + tw->ekey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision); tw->ekey -= 1; } From c63d6f4c94344f7ec4564b137791edd1829ac0fb Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Mon, 26 Jul 2021 18:00:02 +0800 Subject: [PATCH 09/46] [TD-5369] fulltest.sh --- tests/pytest/fulltest.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index ab7dedc959..923934ac69 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -161,7 +161,11 @@ python3 test.py -f tools/taosdemoTestTblAlt.py python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py - +# nano support +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py # update @@ -363,15 +367,15 @@ python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_cacheLastRow.py python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py - -# nano support -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py python3 ./test.py -f insert/flushwhiledrop.py #======================p4-end=============== python3 test.py -f tools/taosdemoAllTest/pytest.py + + + + + + From a960f0ff790a3fc868171cd927753c34e8625d1c Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Mon, 26 Jul 2021 18:38:37 +0800 Subject: [PATCH 10/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/TSDBJNIConnector.c | 15 +++++++++-- src/client/src/tscParseLineProtocol.c | 39 ++++++++++++++++++++++++--- src/client/src/tscPrepare.c | 4 +-- src/client/src/tscSQLParser.c | 13 ++++++--- src/client/src/tscServer.c | 6 ++++- src/common/src/tname.c | 2 +- 6 files changed, 67 insertions(+), 12 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index c9b00800e6..667a689979 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -728,6 +728,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(J int32_t code = taos_stmt_prepare(pStmt, str, len); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + free(str); return JNI_TDENGINE_ERROR; } @@ -919,6 +920,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI char* curTags = tagsData; TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND)); + if (tagsBind == NULL) { + jniError("numOfTags:%d, alloc memory failed", numOfTags); + return JNI_OUT_OF_MEMORY; + } for(int32_t i = 0; i < numOfTags; ++i) { tagsBind[i].buffer_type = typeArray[i]; tagsBind[i].buffer = curTags; @@ -941,9 +946,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); + free(tagsBind); return JNI_TDENGINE_ERROR; } - + free(tagsBind); return JNI_SUCCESS; } @@ -957,7 +963,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(J int numLines = (*env)->GetArrayLength(env, lines); char** c_lines = calloc(numLines, sizeof(char*)); - + if (c_lines == NULL) { + jniError("c_lines:%d, alloc memory failed", c_lines); + return JNI_OUT_OF_MEMORY; + } for (int i = 0; i < numLines; ++i) { jstring line = (jstring) ((*env)->GetObjectArrayElement(env, lines, i)); c_lines[i] = (char*)(*env)->GetStringUTFChars(env, line, 0); @@ -972,8 +981,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(J if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, taos, tstrerror(code)); + free(c_lines); return JNI_TDENGINE_ERROR; } + free(c_lines); return code; } \ No newline at end of file diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index d5883af7f6..f0e5017355 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -411,6 +411,11 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { taos_free_result(res); SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); + if (pSql == NULL){ + tscError("failed to allocate memory, reason:%s", strerror(errno)); + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + return code; + } pSql->pTscObj = taos; pSql->signature = pSql; pSql->fp = NULL; @@ -421,11 +426,13 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; sprintf(pSql->cmd.payload, "table name is invalid"); + tscFreeSqlObj(pSql); return code; } SName sname = {0}; if ((code = tscSetTableFullName(&sname, &tableToken, pSql)) != TSDB_CODE_SUCCESS) { + tscFreeSqlObj(pSql); return code; } char fullTableName[TSDB_TABLE_FNAME_LEN] = {0}; @@ -607,6 +614,10 @@ static int32_t changeChildTableTagValue(TAOS* taos, const char* cTableName, cons static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, const char* sTableName, SArray* tagsSchema, SArray* tagsBind) { size_t numTags = taosArrayGetSize(tagsSchema); char* sql = malloc(tsMaxSQLStringLen+1); + if (sql == NULL) { + tscError("malloc sql memory error"); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } int freeBytes = tsMaxSQLStringLen + 1; sprintf(sql, "create table if not exists %s using %s", cTableName, sTableName); @@ -628,24 +639,31 @@ static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, co tscDebug("create table : %s", sql); TAOS_STMT* stmt = taos_stmt_init(taos); + if (stmt == NULL) { + free(sql); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } int32_t code; code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql)); free(sql); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); + free(stmt); return code; } code = taos_stmt_bind_param(stmt, TARRAY_GET_START(tagsBind)); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); + free(stmt); return code; } code = taos_stmt_execute(stmt); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); + free(stmt); return code; } @@ -660,6 +678,11 @@ static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, co static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* colsSchema, SArray* rowsBind) { size_t numCols = taosArrayGetSize(colsSchema); char* sql = malloc(tsMaxSQLStringLen+1); + if (sql == NULL) { + tscError("malloc sql memory error"); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + int32_t freeBytes = tsMaxSQLStringLen + 1 ; sprintf(sql, "insert into ? ("); @@ -681,11 +704,15 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols int32_t try = 0; TAOS_STMT* stmt = taos_stmt_init(taos); - + if (stmt == NULL) { + free(sql); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql)); free(sql); if (code != 0) { + free(stmt); tscError("%s", taos_stmt_errstr(stmt)); return code; } @@ -694,6 +721,7 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols code = taos_stmt_set_tbname(stmt, cTableName); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); + free(stmt); return code; } @@ -703,11 +731,13 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols code = taos_stmt_bind_param(stmt, colsBinds); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); + free(stmt); return code; } code = taos_stmt_add_batch(stmt); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); + free(stmt); return code; } } @@ -1627,7 +1657,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) { static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index) { const char *cur = *index; - char key[TSDB_COL_NAME_LEN]; + char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid 1685 line over write uint16_t len = 0; //key field cannot start with digit @@ -1704,7 +1734,10 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index const char *cur = *index; uint16_t len = 0; - pSml->stableName = calloc(TSDB_TABLE_NAME_LEN, 1); + pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + 1, 1); // +1 to avoid 1772 line over write + if (pSml->stableName == NULL){ + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } if (isdigit(*cur)) { tscError("Measurement field cannnot start with digit"); free(pSml->stableName); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 7306523660..efdc24c899 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1628,8 +1628,8 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags if (pStmt->mtb.subSet && taosHashGetSize(pStmt->mtb.pTableHash) > 0) { STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - char sTableName[TSDB_TABLE_FNAME_LEN]; - strncpy(sTableName, pTableMeta->sTableName, sizeof(sTableName)); + char sTableName[TSDB_TABLE_FNAME_LEN + 1] = {0}; + strncpy(sTableName, pTableMeta->sTableName, sizeof(sTableName) - 1); SStrToken tname = {0}; tname.type = TK_STRING; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 9a3b36895d..2d5a3d524e 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -421,7 +421,8 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) { tfree(*buf); return TSDB_CODE_TSC_APP_ERROR; } - + close(fd); + tfree(*buf); return TSDB_CODE_SUCCESS; } @@ -8110,7 +8111,8 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { assert(maxSize < 80 * TSDB_MAX_COLUMNS); if (!pSql->pBuf) { if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto _end; } } pTableMeta = calloc(1, maxSize); @@ -8351,14 +8353,18 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS // create dummy table meta info STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo)); + if (pTableMetaInfo1 == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub); if (subInfo->aliasName.n > 0) { if (subInfo->aliasName.n >= TSDB_TABLE_FNAME_LEN) { + free(pTableMetaInfo1); return invalidOperationMsg(msgBuf, "subquery alias name too long"); } - strncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, subInfo->aliasName.n); + strncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, MIN(subInfo->aliasName.n, sizeof(pTableMetaInfo1->aliasName) - 1)); } taosArrayPush(pQueryInfo->pUpstream, &pSub); @@ -8368,6 +8374,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS STableMetaInfo** tmp = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES); if (tmp == NULL) { + free(pTableMetaInfo1); return TSDB_CODE_TSC_OUT_OF_MEMORY; } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index f5d6765a5d..401e65efd4 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -164,7 +164,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) { vgroupInfo.inUse = pEpSet->inUse; vgroupInfo.numOfEps = pEpSet->numOfEps; for (int32_t i = 0; i < vgroupInfo.numOfEps; i++) { - strncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN); + strncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN); // buffer not null terminated risk vgroupInfo.ep[i].port = pEpSet->port[i]; } @@ -2048,8 +2048,12 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { assert(pTableMetaInfo->pTableMeta == NULL); STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg); + if (pTableMeta == NULL){ + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) { tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name)); + free(pTableMeta); return TSDB_CODE_TSC_INVALID_VALUE; } diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 26502c5d9c..5da48b2e9a 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -319,7 +319,7 @@ int32_t tNameGetDbName(const SName* name, char* dst) { int32_t tNameGetFullDbName(const SName* name, char* dst) { assert(name != NULL && dst != NULL); - snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN, + snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN, // there is a over write risk "%s.%s", name->acctId, name->dbname); return 0; } From 4833f5eaa3a5d5368dc86ef86be0667be7dc7ffe Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Tue, 27 Jul 2021 10:22:29 +0800 Subject: [PATCH 11/46] metrics_del.sim 23:50 insert two days bug fixed --- tests/script/general/stream/metrics_del.sim | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/script/general/stream/metrics_del.sim b/tests/script/general/stream/metrics_del.sim index 321658cd8d..6cc3da71e9 100644 --- a/tests/script/general/stream/metrics_del.sim +++ b/tests/script/general/stream/metrics_del.sim @@ -34,11 +34,11 @@ while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - $x = -1440 + $x = 0 $y = 0 while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) + $ts = 1626710400000 + $x + sql insert into $tb values ($ts , $y , $y ) $x = $x + 1 $y = $y + 1 endw From 74fa1114f314738539819e519d5e56daab909bf4 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 27 Jul 2021 11:06:04 +0800 Subject: [PATCH 12/46] [TD-5369] change the nums of tables and insert rows! --- .../taosdemoTestNanoDatabase.json | 8 ++--- .../taosdemoTestNanoDatabaseNow.json | 2 +- .../taosdemoTestSupportNanoInsert.py | 36 +++++++++---------- .../taosdemoTestSupportNanoQuery.py | 22 ++++++------ .../taosdemoTestSupportNanoQuerycsv.json | 2 +- .../taosdemoTestSupportNanoSubscribe.json | 2 +- .../taosdemoTestSupportNanosubscribe.py | 6 ++-- 7 files changed, 38 insertions(+), 40 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json index 9010415fe6..246f1c35f2 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json @@ -35,13 +35,13 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb0_", "auto_create_table": "no", "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10000, + "insert_rows": 100, "childtable_offset":0, "multi_thread_write_one_tbl": "no", "insert_interval":0, @@ -61,13 +61,13 @@ { "name": "stb1", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb1_", "auto_create_table": "no", "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10000, + "insert_rows": 100, "childtable_offset":0, "multi_thread_write_one_tbl": "no", "insert_interval":0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json index d2542a0eba..f36b1f9b4c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json @@ -35,7 +35,7 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 100, "childtable_prefix": "tb0_", "auto_create_table": "no", "batch_create_tbl_num": 20, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 88a917da85..266a8fa712 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -56,46 +56,47 @@ class TDTestCase: os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) tdSql.execute("use nsdb") tdSql.query("show stables") - tdSql.checkData(0, 4, 1000) + tdSql.checkData(0, 4, 100) tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb0_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000000) + tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") tdSql.checkDataType(9, 1,"TIMESTAMP") tdSql.query("select last(ts) from stb0") - tdSql.getData(0, 0) + tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") # check stable stb1 which is insert with disord tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb1_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 10000000) + tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb1") tdSql.checkDataType(9, 1,"TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") - tdSql.checkData(0, 0,"2021-07-01 00:01:39.990000000") - + tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") + # insert data from now time # check stable stb0 os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath) + tdSql.execute("use nsdb2") tdSql.query("show stables") - tdSql.checkData(0, 4, 1000) + tdSql.checkData(0, 4, 100) tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from tb0_0") tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 100000) + tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb0") tdSql.checkDataType(9,1,"TIMESTAMP") @@ -117,16 +118,11 @@ class TDTestCase: os.system("rm -rf ./insert_res.txt") os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") + # taosdemo test insert with command and parameter , detals show taosdemo --help - - os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) - - os.system("%staosdemo -u root -P taosdata -p 6030 -h vm84 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 600) - # check taosdemo -s sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;', diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py index 21a7037ce6..5a37cf9c7c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py @@ -55,15 +55,15 @@ class TDTestCase: # use where to filter - tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") - tdSql.checkData(0, 0, 3999000) - tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") - tdSql.checkData(0, 0, 1000000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ") + tdSql.checkData(0, 0, 4000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 5900) - tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000 \" ;") - tdSql.checkData(0, 0, 3999) - tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;") - tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;") + tdSql.checkData(0, 0, 40) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 59) # select max min avg from special col @@ -87,8 +87,8 @@ class TDTestCase: print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) tdSql.query("select count(*) from stb0 group by tbname;") - tdSql.checkData(0, 0, 10000) - tdSql.checkData(100, 0, 10000) + tdSql.checkData(0, 0, 100) + tdSql.checkData(10, 0, 100) # query : query above sqls by taosdemo and continuously @@ -105,7 +105,7 @@ class TDTestCase: tdSql.checkDataType(3, 1, "TIMESTAMP") tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) - tdSql.query("select count(*) from stb0 where ts >now -22d-1h-3s") + tdSql.query("select count(*) from stb0 where ts now -22d-1h-3s ;", + "sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;", "result": "./query_res1.txt" }, { diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json index 26d405b65b..1cc834164e 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json @@ -20,7 +20,7 @@ "result": "./subscribe_res0.txt" }, { - "sql": "select * from stb0 where ts > now -20d-1h-3s ;", + "sql": "select * from stb0 where ts < now -2d-1h-3s ;", "result": "./subscribe_res1.txt" }, { diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py index f6324577c1..6dcea6e7e0 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py @@ -79,7 +79,7 @@ class TDTestCase: # merge result files - sleep(20) + sleep(10) os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") @@ -90,7 +90,7 @@ class TDTestCase: self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) subTimes1 = self.subTimes("all_subscribe_res1.txt") - self.assertCheck("all_subscribe_res1.txt",subTimes1 ,0) + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,200) subTimes2 = self.subTimes("all_subscribe_res2.txt") self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) @@ -103,6 +103,7 @@ class TDTestCase: os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") subTimes0 = self.subTimes("all_subscribe_res0.txt") + print("pass") self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) @@ -112,6 +113,7 @@ class TDTestCase: sleep(3) os.system("rm -rf ./subscribe_res*") os.system("rm -rf ./all_subscribe*") + os.system("rm -rf ./*.py.sql") From 8aa0a37392c978a1f93448d839bc732e93c3d5c1 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 27 Jul 2021 11:40:47 +0800 Subject: [PATCH 13/46] [TD-4432]: add taodemo-testcase that using stmt interface --- .../taosdemoTestInsertWithJsonStmt.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py index cce6c83a07..0aade43183 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py @@ -244,17 +244,17 @@ class TDTestCase: tdSql.query("select count(*) from stb1") tdSql.checkData(0, 0, 10) - # # insert: sample json - # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) - # tdSql.execute("use dbtest123") - # tdSql.query("select c2 from stb0") - # tdSql.checkData(0, 0, 2147483647) - # tdSql.query("select * from stb1 where t1=-127") - # tdSql.checkRows(20) - # tdSql.query("select * from stb1 where t2=127") - # tdSql.checkRows(10) - # tdSql.query("select * from stb1 where t2=126") - # tdSql.checkRows(10) + # insert: sample json + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) + tdSql.execute("use dbtest123") + tdSql.query("select c2 from stb0") + tdSql.checkData(0, 0, 2147483647) + tdSql.query("select * from stb1 where t1=-127") + tdSql.checkRows(20) + tdSql.query("select * from stb1 where t2=127") + tdSql.checkRows(10) + tdSql.query("select * from stb1 where t2=126") + tdSql.checkRows(10) # insert: test interlace parament os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json -y " % binPath) From eaed6da8cede10b8960d69791950a9731735ab78 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Tue, 27 Jul 2021 13:57:22 +0800 Subject: [PATCH 14/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/tscGlobalmerge.c | 7 ++++++- src/client/src/tscSql.c | 10 +++++++++- src/client/src/tscSubquery.c | 3 ++- src/client/src/tscUtil.c | 8 ++++++-- src/query/inc/qExecutor.h | 1 + src/query/src/qExecutor.c | 23 ++++++++++++----------- src/query/src/qPlan.c | 4 ++++ 7 files changed, 40 insertions(+), 16 deletions(-) diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c index 8ca8c688f0..9c9bac5951 100644 --- a/src/client/src/tscGlobalmerge.c +++ b/src/client/src/tscGlobalmerge.c @@ -135,7 +135,7 @@ int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, t SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize); if (ds == NULL) { tscError("0x%"PRIx64" failed to create merge structure", id); - tfree(pMerger); + tfree(*pMerger); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -443,6 +443,10 @@ int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu } pModel = createColumnModel(pSchema, (int32_t)size, capacity); + if (pModel == NULL){ + tfree(pSchema); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } tfree(pSchema); int32_t pg = DEFAULT_PAGE_SIZE; @@ -458,6 +462,7 @@ int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu } if (createOrderDescriptor(pOrderDesc, pQueryInfo, pModel) != TSDB_CODE_SUCCESS) { + tfree(pModel); return TSDB_CODE_TSC_OUT_OF_MEMORY; } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 7675bded65..b6e2604bd2 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -963,8 +963,14 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { strtolower(str, tableNameList); SArray* plist = taosArrayInit(4, POINTER_BYTES); + if (plist == NULL) { + tfree(str); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + SArray* vgroupList = taosArrayInit(4, POINTER_BYTES); - if (plist == NULL || vgroupList == NULL) { + if (vgroupList == NULL) { + taosArrayDestroy(plist); tfree(str); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -980,6 +986,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { if (code != TSDB_CODE_SUCCESS) { tscFreeSqlObj(pSql); + taosArrayDestroyEx(plist); + taosArrayDestroyEx(vgroupList); return code; } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b72bd78b1b..73794a3aa4 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2476,8 +2476,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { pState->states = calloc(pState->numOfSub, sizeof(*pState->states)); if (pState->states == NULL) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; + tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); + tscAsyncResultOnError(pSql); - tfree(pMemoryBuf); return ret; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 0d69fe173f..08950b93e3 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1246,6 +1246,10 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue } pSourceOperator = createJoinOperatorInfo(p, px->numOfTables, schema, num); + + for(int32_t i = 0; i < px->numOfTables; ++i) { + destroyOperatorInfo(p[i]); + } tfree(p); } else { size_t num = taosArrayGetSize(px->colList); @@ -4368,7 +4372,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v pChild->sversion = p->sversion; pChild->tversion = p->tversion; - memcpy(&pChild->tableInfo, &p->tableInfo, sizeof(STableInfo)); + memcpy(&pChild->tableInfo, &p->tableInfo, sizeof(STableComInfo)); int32_t total = pChild->tableInfo.numOfColumns + pChild->tableInfo.numOfTags; memcpy(pChild->schema, p->schema, sizeof(SSchema) *total); @@ -4719,7 +4723,7 @@ static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSq int32_t len = 0; if (nextStr == NULL) { - strncpy(tablename, *str, TSDB_TABLE_FNAME_LEN); + strncpy(tablename, *str, TSDB_TABLE_FNAME_LEN - 1); len = (int32_t) strlen(tablename); } else { len = (int32_t)(nextStr - (*str)); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 4581ba258d..71e9792bd8 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -578,6 +578,7 @@ void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p); SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows); void* destroyOutputBuf(SSDataBlock* pBlock); void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols); +void destroyOperatorInfo(SOperatorInfo* pOperator); void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order); int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 982c45c441..7e3e542f5e 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -191,8 +191,6 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyAggOperatorInfo(void* param, int32_t numOfOutput); -static void destroyOperatorInfo(SOperatorInfo* pOperator); - static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock); @@ -3563,6 +3561,7 @@ STableQueryInfo* createTmpTableQueryInfo(STimeWindow win) { int32_t initialSize = 16; int32_t code = initResultRowInfo(&pTableQueryInfo->resInfo, initialSize, TSDB_DATA_TYPE_INT); if (code != TSDB_CODE_SUCCESS) { + tfree(pTableQueryInfo); return NULL; } @@ -5945,7 +5944,7 @@ static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr) { return 1; } -static void destroyOperatorInfo(SOperatorInfo* pOperator) { +void destroyOperatorInfo(SOperatorInfo* pOperator) { if (pOperator == NULL) { return; } @@ -7241,9 +7240,7 @@ void destroyUdfInfo(SUdfInfo* pUdfInfo) { tfree(pUdfInfo); } -static char* getUdfFuncName(char* name, int type) { - char* funcname = calloc(1, TSDB_FUNCTIONS_NAME_MAX_LENGTH + 10); - +static char* getUdfFuncName(char* funcname, char* name, int type) { switch (type) { case TSDB_UDF_FUNC_NORMAL: strcpy(funcname, name); @@ -7314,19 +7311,20 @@ int32_t initUdfInfo(SUdfInfo* pUdfInfo) { return TSDB_CODE_QRY_SYS_ERROR; } - pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_NORMAL)); + char funcname[TSDB_FUNCTIONS_NAME_MAX_LENGTH + 10] = {0}; + pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_NORMAL)); if (NULL == pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL]) { return TSDB_CODE_QRY_SYS_ERROR; } - pUdfInfo->funcs[TSDB_UDF_FUNC_INIT] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_INIT)); + pUdfInfo->funcs[TSDB_UDF_FUNC_INIT] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_INIT)); if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) { - pUdfInfo->funcs[TSDB_UDF_FUNC_FINALIZE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_FINALIZE)); - pUdfInfo->funcs[TSDB_UDF_FUNC_MERGE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_MERGE)); + pUdfInfo->funcs[TSDB_UDF_FUNC_FINALIZE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_FINALIZE)); + pUdfInfo->funcs[TSDB_UDF_FUNC_MERGE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_MERGE)); } - pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_DESTROY)); + pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_DESTROY)); if (pUdfInfo->funcs[TSDB_UDF_FUNC_INIT]) { return (*(udfInitFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_INIT])(&pUdfInfo->init); @@ -7398,10 +7396,12 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp int32_t j = getColumnIndexInSource(pTableInfo, &pExprs[i].base, pTagCols); if (TSDB_COL_IS_TAG(pExprs[i].base.colInfo.flag)) { if (j < TSDB_TBNAME_COLUMN_INDEX || j >= pTableInfo->numOfTags) { + tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } } else { if (j < PRIMARYKEY_TIMESTAMP_COL_INDEX || j >= pTableInfo->numOfCols) { + tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } } @@ -7421,6 +7421,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp int32_t ret = cloneExprFilterInfo(&pExprs[i].base.flist.filterInfo, pExprMsg[i]->flist.filterInfo, pExprMsg[i]->flist.numOfFilters); if (ret) { + tfree(pExprs); return ret; } } diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index e724b0418c..2772e76abe 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -222,6 +222,7 @@ SArray* createQueryPlanImpl(SQueryInfo* pQueryInfo) { if (pQueryInfo->numOfTables > 1) { // it is a join query // 1. separate the select clause according to table + taosArrayDestroy(upstream); upstream = taosArrayInit(5, POINTER_BYTES); for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { @@ -231,6 +232,7 @@ SArray* createQueryPlanImpl(SQueryInfo* pQueryInfo) { SArray* exprList = taosArrayInit(4, POINTER_BYTES); if (tscExprCopy(exprList, pQueryInfo->exprList, uid, true) != 0) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; + tscExprDestroy(exprList); exit(-1); } @@ -245,6 +247,8 @@ SArray* createQueryPlanImpl(SQueryInfo* pQueryInfo) { // 4. add the projection query node SQueryNode* pNode = doAddTableColumnNode(pQueryInfo, pTableMetaInfo, &info, exprList, tableColumnList); + tscColumnListDestroy(tableColumnList); + tscExprDestroy(exprList); taosArrayPush(upstream, &pNode); } From 3e96e6452179255d88443f205a2c6e1dd352bea3 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Tue, 27 Jul 2021 17:05:37 +0800 Subject: [PATCH 15/46] [TD-5534]:fix the coverity medium risk of client --- src/client/src/tscAsync.c | 3 --- src/client/src/tscLocal.c | 8 ++++++-- src/client/src/tscParseInsert.c | 2 +- src/client/src/tscParseLineProtocol.c | 4 ---- src/client/src/tscPrepare.c | 4 +++- src/client/src/tscSQLParser.c | 16 +++++++++------- src/client/src/tscServer.c | 9 +++++++-- src/client/src/tscStream.c | 2 +- src/client/src/tscSubquery.c | 3 +++ src/client/src/tscUtil.c | 17 ++++------------- src/query/inc/qTableMeta.h | 2 +- src/query/src/qExecutor.c | 11 ++++++++--- src/query/src/qPlan.c | 2 +- src/query/src/qResultbuf.c | 5 +++-- src/query/src/qScript.c | 8 +++++--- src/query/src/qSqlParser.c | 24 +++++++++++++++--------- src/query/src/qTableMeta.c | 2 +- src/query/src/queryMain.c | 2 +- src/util/inc/tlist.h | 2 +- 19 files changed, 70 insertions(+), 56 deletions(-) diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index d857d00e15..3c5036cc1a 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -493,9 +493,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { return; } - taosReleaseRef(tscObjRef, pSql->self); - return; - _error: pRes->code = code; tscAsyncResultOnError(pSql); diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index d1a325be35..e49f4e33ae 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -851,15 +851,19 @@ static int32_t tscProcessServStatus(SSqlObj *pSql) { SSqlObj* pHb = (SSqlObj*)taosAcquireRef(tscObjRef, pObj->hbrid); if (pHb != NULL) { pSql->res.code = pHb->res.code; - taosReleaseRef(tscObjRef, pObj->hbrid); } if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + taosReleaseRef(tscObjRef, pObj->hbrid); return pSql->res.code; } - pSql->res.code = checkForOnlineNode(pHb); + if (pHb != NULL) { + pSql->res.code = checkForOnlineNode(pHb); + } + if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + taosReleaseRef(tscObjRef, pObj->hbrid); return pSql->res.code; } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index f24f7a7ecb..8ecc58eb55 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -2105,7 +2105,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow pParentSql->fp = pParentSql->fetchFp; // all data has been sent to vnode, call user function - int32_t v = (code != TSDB_CODE_SUCCESS) ? code : (int32_t)pParentSql->res.numOfRows; + int32_t v = (int32_t)pParentSql->res.numOfRows; (*pParentSql->fp)(pParentSql->param, pParentSql, v); return; } diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index f0e5017355..4b5416c6b9 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -438,10 +438,6 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema) { char fullTableName[TSDB_TABLE_FNAME_LEN] = {0}; memset(fullTableName, 0, tListLen(fullTableName)); tNameExtractFullName(&sname, fullTableName); - if (code != TSDB_CODE_SUCCESS) { - tscFreeSqlObj(pSql); - return code; - } tscFreeSqlObj(pSql); schema->tags = taosArrayInit(8, sizeof(SSchema)); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index efdc24c899..9ff3a0e2dd 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1773,7 +1773,9 @@ int taos_stmt_close(TAOS_STMT* stmt) { } tscDestroyDataBlock(pStmt->mtb.lastBlock, rmMeta); pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, rmMeta); - taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList); + if (pStmt->pSql){ + taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList); + } pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL; taosArrayDestroy(pStmt->mtb.tags); tfree(pStmt->mtb.sqlstr); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 2d5a3d524e..1fac9c9a40 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6860,7 +6860,8 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo tagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); } - SSchema* s = NULL; + SSchema tmp = {.type = 0, .name = "", .colId = 0, .bytes = 0}; + SSchema* s = &tmp; for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i); @@ -6870,7 +6871,9 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo s = tGetTbnameColumnSchema(); } else { if (TSDB_COL_IS_TAG(pColIndex->flag)) { - s = &tagSchema[colIndex]; + if(tagSchema){ + s = &tagSchema[colIndex]; + } } else { s = &pSchema[colIndex]; } @@ -8276,17 +8279,16 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod const char* name = tNameGetTableName(&pTableMetaInfo->name); STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, name, strlen(name)); - + if(!p){ + tscError("taosHashGet meta null. name:%s", name); + return TSDB_CODE_TSC_APP_ERROR; + } pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta); assert(pTableMetaInfo->pTableMeta != NULL); if (p->pVgroupInfo != NULL) { pTableMetaInfo->vgroupList = tscVgroupsInfoDup(p->pVgroupInfo); } - - if (code != TSDB_CODE_SUCCESS) { - return code; - } } return TSDB_CODE_SUCCESS; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 401e65efd4..0748780e1a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -691,7 +691,9 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); } - pSql->epSet.inUse = rand()%pSql->epSet.numOfEps; + if (pSql->epSet.numOfEps > 0){ + pSql->epSet.inUse = rand()%pSql->epSet.numOfEps; + } pQueryMsg->head.vgId = htonl(vgId); STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg; @@ -968,7 +970,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } } - if (query.numOfTags > 0) { + if (query.numOfTags > 0 && query.tagColList != NULL) { for (int32_t i = 0; i < query.numOfTags; ++i) { SColumnInfo* pTag = &query.tagColList[i]; @@ -2351,6 +2353,9 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { break; } + if (!pInfo){ + continue; + } int32_t size = 0; pInfo->vgroupList = createVgroupInfoFromMsg(pMsg, &size, pSql->self); pMsg += size; diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index da5bdf669f..a4d314cb42 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -271,7 +271,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf if (pSql == NULL || numOfRows < 0) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision); - tscError("0x%"PRIx64" stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 " ms", pSql->self, pStream, numOfRows, retryDelayTime); + tscError("stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 " ms", pStream, numOfRows, retryDelayTime); tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime); return; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 73794a3aa4..bef5992d53 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2723,6 +2723,9 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO } static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* pSql) { + if (trsupport->pExtMemBuffer == NULL){ + return; + } int32_t idx = trsupport->subqueryIndex; SSqlObj * pParentSql = trsupport->pParentSql; tOrderDescriptor *pDesc = trsupport->pOrderDescriptor; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 08950b93e3..7e36cce50e 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -275,16 +275,6 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) { f != TSDB_FUNC_DERIVATIVE) { return false; } - - if (f < 0) { - SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * f - 1); - if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) { - return false; - } - - continue; - } - } return true; @@ -3420,7 +3410,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM return NULL; } - if (pTagCols != NULL) { + if (pTagCols != NULL && pTableMetaInfo->pTableMeta != NULL) { tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, pTableMetaInfo->pTableMeta->id.uid); } @@ -3851,7 +3841,8 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); if (pNew == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; - // return NULL; + tscError("pNew == NULL, out of memory"); + return; } pNew->pTscObj = pSql->pTscObj; @@ -4339,7 +4330,7 @@ uint32_t tscGetTableMetaSize(STableMeta* pTableMeta) { assert(pTableMeta != NULL); int32_t totalCols = 0; - if (pTableMeta->tableInfo.numOfColumns >= 0 && pTableMeta->tableInfo.numOfTags >= 0) { + if (pTableMeta->tableInfo.numOfColumns >= 0) { totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; } diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h index 56eea6429f..daef726ca6 100644 --- a/src/query/inc/qTableMeta.h +++ b/src/query/inc/qTableMeta.h @@ -60,7 +60,7 @@ typedef struct STableComInfo { typedef struct STableMeta { int32_t vgId; STableId id; - uint8_t tableType; + int8_t tableType; char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name uint64_t suid; // super table id int16_t sversion; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 7e3e542f5e..11624ddffc 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -30,6 +30,7 @@ #include "tcompare.h" #include "tscompression.h" #include "qScript.h" +#include "tscLog.h" #define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) @@ -750,7 +751,7 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); if (QUERY_IS_ASC_QUERY(pQueryAttr)) { - if (ekey < pDataBlockInfo->window.ekey) { + if (ekey < pDataBlockInfo->window.ekey && pPrimaryColumn) { num = getForwardStepsInBlock(pDataBlockInfo->rows, searchFn, ekey, startPos, order, pPrimaryColumn); if (updateLastKey) { // update the last key item->lastKey = pPrimaryColumn[startPos + (num - 1)] + step; @@ -762,7 +763,7 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc } } } else { // desc - if (ekey > pDataBlockInfo->window.skey) { + if (ekey > pDataBlockInfo->window.skey && pPrimaryColumn) { num = getForwardStepsInBlock(pDataBlockInfo->rows, searchFn, ekey, startPos, order, pPrimaryColumn); if (updateLastKey) { // update the last key item->lastKey = pPrimaryColumn[startPos - (num - 1)] + step; @@ -1299,6 +1300,10 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc assert(pBlock != NULL); int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); + if (pBlock->pDataBlock == NULL){ + tscError("pBlock->pDataBlock == NULL"); + return; + } SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, 0); TSKEY *tsCols = (TSKEY *)(pColInfo->pData); @@ -7540,7 +7545,7 @@ SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pCo int32_t doCreateFilterInfo(SColumnInfo* pCols, int32_t numOfCols, int32_t numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo, uint64_t qId) { *pFilterInfo = calloc(1, sizeof(SSingleColumnFilterInfo) * numOfFilterCols); - if (pFilterInfo == NULL) { + if (*pFilterInfo == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index 2772e76abe..f7fddecaf8 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -40,7 +40,7 @@ static SQueryNode* createQueryNode(int32_t type, const char* name, SQueryNode** pNode->info.type = type; pNode->info.name = strdup(name); - if (pTableInfo->id.uid != 0) { // it is a true table + if (pTableInfo->id.uid != 0 && pTableInfo->tableName) { // it is a true table pNode->tableInfo.id = pTableInfo->id; pNode->tableInfo.tableName = strdup(pTableInfo->tableName); } diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index 05ecf2e9b1..63eba51d6b 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -78,8 +78,9 @@ static char* doDecompressData(void* data, int32_t srcSize, int32_t *dst, SDiskba } *dst = tsDecompressString(data, srcSize, 1, pResultBuf->assistBuf, pResultBuf->pageSize, ONE_STAGE_COMP, NULL, 0); - - memcpy(data, pResultBuf->assistBuf, *dst); + if (*dst > 0) { + memcpy(data, pResultBuf->assistBuf, *dst); + } return data; } diff --git a/src/query/src/qScript.c b/src/query/src/qScript.c index 261164a84c..ed56526782 100644 --- a/src/query/src/qScript.c +++ b/src/query/src/qScript.c @@ -377,9 +377,11 @@ ScriptEnv* getScriptEnvFromPool() { return NULL; } SListNode *pNode = tdListPopHead(pool->scriptEnvs); - tdListNodeGetData(pool->scriptEnvs, pNode, (void *)(&pEnv)); - listNodeFree(pNode); - + if (pNode){ + tdListNodeGetData(pool->scriptEnvs, pNode, (void *)(&pEnv)); + listNodeFree(pNode); + } + pool->cSize--; pthread_mutex_unlock(&pool->mutex); return pEnv; diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index eb920b3e17..08c55d5d7e 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -142,14 +142,17 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) { } if (optrType == TK_NULL) { - pToken->type = TSDB_DATA_TYPE_NULL; - tVariantCreate(&pSqlExpr->value, pToken); + if (pToken){ + pToken->type = TSDB_DATA_TYPE_NULL; + tVariantCreate(&pSqlExpr->value, pToken); + } pSqlExpr->tokenId = optrType; pSqlExpr->type = SQL_NODE_VALUE; } else if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) { - toTSDBType(pToken->type); - - tVariantCreate(&pSqlExpr->value, pToken); + if (pToken) { + toTSDBType(pToken->type); + tVariantCreate(&pSqlExpr->value, pToken); + } pSqlExpr->tokenId = optrType; pSqlExpr->type = SQL_NODE_VALUE; } else if (optrType == TK_NOW) { @@ -162,9 +165,11 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) { } else if (optrType == TK_VARIABLE) { // use nanosecond by default // TODO set value after getting database precision - int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, TSDB_TIME_PRECISION_NANO); - if (ret != TSDB_CODE_SUCCESS) { - terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; + if (pToken) { + int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, TSDB_TIME_PRECISION_NANO); + if (ret != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; + } } pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP; @@ -340,8 +345,9 @@ static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1; } +// this function is not used for temporary int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { - if ((left == NULL && right) || (left && right == NULL)) { + if ((left == NULL && right) || (left && right == NULL) || (left == NULL && right == NULL)) { return 1; } diff --git a/src/query/src/qTableMeta.c b/src/query/src/qTableMeta.c index d25d6b7004..f687b8aa1f 100644 --- a/src/query/src/qTableMeta.c +++ b/src/query/src/qTableMeta.c @@ -72,7 +72,7 @@ SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) { } STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) { - assert(pTableMetaMsg != NULL && pTableMetaMsg->numOfColumns >= 2 && pTableMetaMsg->numOfTags >= 0); + assert(pTableMetaMsg != NULL && pTableMetaMsg->numOfColumns >= 2); int32_t schemaSize = (pTableMetaMsg->numOfColumns + pTableMetaMsg->numOfTags) * sizeof(SSchema); STableMeta* pTableMeta = calloc(1, sizeof(STableMeta) + schemaSize); diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 0d140d5ffb..7d30f7c668 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -261,7 +261,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex SQInfo *pQInfo = (SQInfo *)qinfo; if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - qError("QInfo:0x%"PRIx64" invalid qhandle", pQInfo->qId); + qError("QInfo invalid qhandle"); return TSDB_CODE_QRY_INVALID_QHANDLE; } diff --git a/src/util/inc/tlist.h b/src/util/inc/tlist.h index 6c96ec0b13..7581904540 100644 --- a/src/util/inc/tlist.h +++ b/src/util/inc/tlist.h @@ -44,7 +44,7 @@ typedef struct { #define listNEles(l) (l)->numOfEles #define listEleSize(l) (l)->eleSize #define isListEmpty(l) ((l)->numOfEles == 0) -#define listNodeFree(n) free(n); +#define listNodeFree(n) free(n) SList * tdListNew(int eleSize); void * tdListFree(SList *list); From b2c0fbe0ce15ba13d79a4e262dee1a1d106bdb02 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Tue, 27 Jul 2021 17:08:32 +0800 Subject: [PATCH 16/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/TSDBJNIConnector.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 667a689979..b373e360a1 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -964,7 +964,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(J int numLines = (*env)->GetArrayLength(env, lines); char** c_lines = calloc(numLines, sizeof(char*)); if (c_lines == NULL) { - jniError("c_lines:%d, alloc memory failed", c_lines); + jniError("c_lines:%p, alloc memory failed", c_lines); return JNI_OUT_OF_MEMORY; } for (int i = 0; i < numLines; ++i) { From f4efe18aa35404c2353031394177a1db79ee1ebe Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Tue, 27 Jul 2021 17:10:51 +0800 Subject: [PATCH 17/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/tscSql.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index b6e2604bd2..6f3d5c3a63 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -986,8 +986,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { if (code != TSDB_CODE_SUCCESS) { tscFreeSqlObj(pSql); - taosArrayDestroyEx(plist); - taosArrayDestroyEx(vgroupList); + taosArrayDestroyEx(plist, freeElem); + taosArrayDestroyEx(vgroupList, freeElem); return code; } From ff9971ffa2838f08dbeaf761213d563e6ec15f22 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Tue, 27 Jul 2021 17:43:03 +0800 Subject: [PATCH 18/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/tscLocal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index e49f4e33ae..9e65094e16 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -860,10 +860,10 @@ static int32_t tscProcessServStatus(SSqlObj *pSql) { if (pHb != NULL) { pSql->res.code = checkForOnlineNode(pHb); + taosReleaseRef(tscObjRef, pObj->hbrid); } if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { - taosReleaseRef(tscObjRef, pObj->hbrid); return pSql->res.code; } From 6faf1d5bb3e98af1b587299b9a14bd95ec30ebca Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 27 Jul 2021 18:26:10 +0800 Subject: [PATCH 19/46] Update sdbCompClusterReplica2.py --- tests/pytest/wal/sdbCompClusterReplica2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/wal/sdbCompClusterReplica2.py b/tests/pytest/wal/sdbCompClusterReplica2.py index dd5a375151..ba80e3864a 100644 --- a/tests/pytest/wal/sdbCompClusterReplica2.py +++ b/tests/pytest/wal/sdbCompClusterReplica2.py @@ -86,7 +86,7 @@ class TwoClients: tdSql.execute("alter table stb2_0 add column col2 binary(4)") tdSql.execute("alter table stb2_0 drop column col1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") - tdSql.execute("drop dnode 2") + tdSql.execute("drop dnode chenhaoran02") sleep(10) os.system("rm -rf /var/lib/taos/*") print("clear dnode chenhaoran02'data files") @@ -142,4 +142,4 @@ class TwoClients: clients = TwoClients() clients.initConnection() # clients.getBuildPath() -clients.run() \ No newline at end of file +clients.run() From 928473f0cbf77e3910539d6b8357c43c9caa9006 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Tue, 27 Jul 2021 18:27:35 +0800 Subject: [PATCH 20/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/tscParseLineProtocol.c | 2 +- src/client/src/tscPrepare.c | 4 ++-- src/client/src/tscSQLParser.c | 2 +- src/client/src/tscServer.c | 2 +- src/client/src/tscUtil.c | 6 +----- src/query/inc/qExecutor.h | 1 - src/query/src/qExecutor.c | 2 +- 7 files changed, 7 insertions(+), 12 deletions(-) diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 4b5416c6b9..1790403a5e 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -1653,7 +1653,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) { static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index) { const char *cur = *index; - char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid 1685 line over write + char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write uint16_t len = 0; //key field cannot start with digit diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 9ff3a0e2dd..2c2a299549 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1628,8 +1628,8 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags if (pStmt->mtb.subSet && taosHashGetSize(pStmt->mtb.pTableHash) > 0) { STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - char sTableName[TSDB_TABLE_FNAME_LEN + 1] = {0}; - strncpy(sTableName, pTableMeta->sTableName, sizeof(sTableName) - 1); + char sTableName[TSDB_TABLE_FNAME_LEN] = {0}; + tstrncpy(sTableName, pTableMeta->sTableName, sizeof(sTableName)); SStrToken tname = {0}; tname.type = TK_STRING; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 1fac9c9a40..e899cc3a07 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8366,7 +8366,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS return invalidOperationMsg(msgBuf, "subquery alias name too long"); } - strncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, MIN(subInfo->aliasName.n, sizeof(pTableMetaInfo1->aliasName) - 1)); + tstrncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, MIN(subInfo->aliasName.n, sizeof(pTableMetaInfo1->aliasName))); } taosArrayPush(pQueryInfo->pUpstream, &pSub); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 0748780e1a..f3c24eb407 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -164,7 +164,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) { vgroupInfo.inUse = pEpSet->inUse; vgroupInfo.numOfEps = pEpSet->numOfEps; for (int32_t i = 0; i < vgroupInfo.numOfEps; i++) { - strncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN); // buffer not null terminated risk + tstrncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN); // buffer not null terminated risk vgroupInfo.ep[i].port = pEpSet->port[i]; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 7e36cce50e..27fc19b205 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1236,10 +1236,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue } pSourceOperator = createJoinOperatorInfo(p, px->numOfTables, schema, num); - - for(int32_t i = 0; i < px->numOfTables; ++i) { - destroyOperatorInfo(p[i]); - } tfree(p); } else { size_t num = taosArrayGetSize(px->colList); @@ -4714,7 +4710,7 @@ static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSq int32_t len = 0; if (nextStr == NULL) { - strncpy(tablename, *str, TSDB_TABLE_FNAME_LEN - 1); + tstrncpy(tablename, *str, TSDB_TABLE_FNAME_LEN); len = (int32_t) strlen(tablename); } else { len = (int32_t)(nextStr - (*str)); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 71e9792bd8..4581ba258d 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -578,7 +578,6 @@ void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p); SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows); void* destroyOutputBuf(SSDataBlock* pBlock); void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols); -void destroyOperatorInfo(SOperatorInfo* pOperator); void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order); int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 11624ddffc..6c18419619 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5949,7 +5949,7 @@ static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr) { return 1; } -void destroyOperatorInfo(SOperatorInfo* pOperator) { +static void destroyOperatorInfo(SOperatorInfo* pOperator) { if (pOperator == NULL) { return; } From 4216a400a3e22d00cd308a80fc0f1f382eb9dd40 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Tue, 27 Jul 2021 18:29:49 +0800 Subject: [PATCH 21/46] [TD-5534]:fix the coverity medium risk of client --- src/query/src/qExecutor.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6c18419619..cc51cafe38 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -192,6 +192,8 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyAggOperatorInfo(void* param, int32_t numOfOutput); +static void destroyOperatorInfo(SOperatorInfo* pOperator); + static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock); From 211c079b074696a645bca76b2bbc22f28dacadb5 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Wed, 28 Jul 2021 12:11:17 +0800 Subject: [PATCH 22/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/TSDBJNIConnector.c | 15 ++++----------- src/client/src/tscGlobalmerge.c | 3 +-- src/client/src/tscParseLineProtocol.c | 12 ++++++------ src/client/src/tscSQLParser.c | 4 ++-- src/client/src/tscServer.c | 2 +- src/client/src/tscSubquery.c | 2 +- 6 files changed, 15 insertions(+), 23 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index b373e360a1..7ba613de88 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -726,13 +726,12 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(J TAOS_STMT* pStmt = taos_stmt_init(tscon); int32_t code = taos_stmt_prepare(pStmt, str, len); + tfree(str); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); - free(str); return JNI_TDENGINE_ERROR; } - free(str); return (jlong) pStmt; } @@ -920,10 +919,6 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI char* curTags = tagsData; TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND)); - if (tagsBind == NULL) { - jniError("numOfTags:%d, alloc memory failed", numOfTags); - return JNI_OUT_OF_MEMORY; - } for(int32_t i = 0; i < numOfTags; ++i) { tagsBind[i].buffer_type = typeArray[i]; tagsBind[i].buffer = curTags; @@ -942,14 +937,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI tfree(lengthArray); tfree(typeArray); tfree(nullArray); + tfree(tagsBind); (*env)->ReleaseStringUTFChars(env, tableName, name); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); - free(tagsBind); return JNI_TDENGINE_ERROR; } - free(tagsBind); return JNI_SUCCESS; } @@ -979,12 +973,11 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(J (*env)->ReleaseStringUTFChars(env, line, c_lines[i]); } + tfree(c_lines); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, taos, tstrerror(code)); - free(c_lines); + return JNI_TDENGINE_ERROR; } - - free(c_lines); return code; } \ No newline at end of file diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c index 9c9bac5951..e696d54abd 100644 --- a/src/client/src/tscGlobalmerge.c +++ b/src/client/src/tscGlobalmerge.c @@ -443,11 +443,10 @@ int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu } pModel = createColumnModel(pSchema, (int32_t)size, capacity); + tfree(pSchema); if (pModel == NULL){ - tfree(pSchema); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - tfree(pSchema); int32_t pg = DEFAULT_PAGE_SIZE; int32_t overhead = sizeof(tFilePage); diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 1790403a5e..d12ad04aa8 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -701,14 +701,14 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols TAOS_STMT* stmt = taos_stmt_init(taos); if (stmt == NULL) { - free(sql); + tfree(sql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql)); - free(sql); + tfree(sql); if (code != 0) { - free(stmt); + tfree(stmt); tscError("%s", taos_stmt_errstr(stmt)); return code; } @@ -717,7 +717,7 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols code = taos_stmt_set_tbname(stmt, cTableName); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); - free(stmt); + tfree(stmt); return code; } @@ -727,13 +727,13 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols code = taos_stmt_bind_param(stmt, colsBinds); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); - free(stmt); + tfree(stmt); return code; } code = taos_stmt_add_batch(stmt); if (code != 0) { tscError("%s", taos_stmt_errstr(stmt)); - free(stmt); + tfree(stmt); return code; } } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e899cc3a07..fb1eb56422 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8362,7 +8362,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS if (subInfo->aliasName.n > 0) { if (subInfo->aliasName.n >= TSDB_TABLE_FNAME_LEN) { - free(pTableMetaInfo1); + tfree(pTableMetaInfo1); return invalidOperationMsg(msgBuf, "subquery alias name too long"); } @@ -8376,7 +8376,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS STableMetaInfo** tmp = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES); if (tmp == NULL) { - free(pTableMetaInfo1); + tfree(pTableMetaInfo1); return TSDB_CODE_TSC_OUT_OF_MEMORY; } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index f3c24eb407..76382d9afd 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2055,7 +2055,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { } if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) { tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name)); - free(pTableMeta); + tfree(pTableMeta); return TSDB_CODE_TSC_INVALID_VALUE; } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index bef5992d53..2d00253562 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2476,7 +2476,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { pState->states = calloc(pState->numOfSub, sizeof(*pState->states)); if (pState->states == NULL) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); + tscDestroyGlob alMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); tscAsyncResultOnError(pSql); return ret; From a7dc581f23db01655ff247762dc9767bea90cab7 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Wed, 28 Jul 2021 12:12:33 +0800 Subject: [PATCH 23/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/tscSubquery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 2d00253562..bef5992d53 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2476,7 +2476,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { pState->states = calloc(pState->numOfSub, sizeof(*pState->states)); if (pState->states == NULL) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscDestroyGlob alMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); + tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); tscAsyncResultOnError(pSql); return ret; From 7845fc96821ec8f29fd5ca813e67cbaa38e17f9c Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:04:59 +0800 Subject: [PATCH 24/46] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 539 ++++++++++++++++++++++++++++ 1 file changed, 539 insertions(+) create mode 100644 tests/pytest/query/operator_cost.py diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py new file mode 100644 index 0000000000..b94d5fa3b3 --- /dev/null +++ b/tests/pytest/query/operator_cost.py @@ -0,0 +1,539 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5074 + + startTime = time.time() + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + tdSql.execute('''create table table_21 using stable_2 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdLog.info("========== operator=1(OP_TableScan) ==========") + tdLog.info("========== operator=7(OP_Project) ==========") + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=6(OP_Aggregate) ==========") + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=9(OP_Limit) ==========") + sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' + tdSql.query(sql) + tdSql.checkRows(5) + sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=16(OP_DummyInput) ==========") + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + + tdLog.info("========== operator=10(OP_SLimit) ==========") + sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=20(OP_Distinct) ==========") + tdLog.info("========== operator=4(OP_TagScan) ==========") + sql = '''select distinct(t_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + sql = '''select distinct(loc) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + sql = '''select distinct(tbname) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + + tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") + sql = '''select last(q_int),first(q_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bool),first(q_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_binary),first(q_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_float),first(q_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_double),first(q_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_ts),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), + first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), + first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=8(OP_Groupby) ==========") + sql = '''select stddev(q_int) from table_0 group by q_int;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' + tdSql.query(sql) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' + tdSql.query(sql) + + tdLog.info("========== operator=11(OP_TimeWindow) ==========") + sql = '''select last(q_int) from table_0 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=12(OP_SessionWindow) ==========") + sql = '''select count(*) from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*) from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=13(OP_Fill) ==========") + sql = '''select sum(q_int) from table_0 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + #TD-5190 + sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + + tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") + sql = '''select avg(q_int) from stable_1 where ts=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having sum(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having avg(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having min(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having max(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having first(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having last(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + + tdLog.info("========== operator=21(OP_Join) ==========") + sql = '''select t1.q_int,t2.q_int from + (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from table_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from table_0) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from stable_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.*,t3.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 + where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + + tdLog.info("========== operator=22(OP_StateWindow) ==========") + sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 state_window(q_bigint);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 state_window(q_smallint);''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 8b6d976e86db51c9106c054747bc48aacc243b05 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:05:16 +0800 Subject: [PATCH 25/46] [TD-5074]:test operator cost --- tests/pytest/query/operator.py | 539 --------------------------------- 1 file changed, 539 deletions(-) delete mode 100644 tests/pytest/query/operator.py diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py deleted file mode 100644 index b94d5fa3b3..0000000000 --- a/tests/pytest/query/operator.py +++ /dev/null @@ -1,539 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql -import random -import time - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - self.ts = 1600000000000 - self.num = 10 - - def run(self): - tdSql.prepare() - # test case for https://jira.taosdata.com:18080/browse/TD-5074 - - startTime = time.time() - - tdSql.execute('''create stable stable_1 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) - tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, - t_bool bool , t_binary binary(20) , t_nchar nchar(20) , - t_float float , t_double double , t_ts timestamp);''') - tdSql.execute('''create stable stable_2 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) - tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, - t_bool bool , t_binary binary(20) , t_nchar nchar(20) , - t_float float , t_double double , t_ts timestamp);''') - tdSql.execute('''create table table_0 using stable_1 - tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') - tdSql.execute('''create table table_1 using stable_1 - tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , - 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') - tdSql.execute('''create table table_2 using stable_1 - tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , - 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') - tdSql.execute('''create table table_3 using stable_1 - tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') - tdSql.execute('''create table table_4 using stable_1 - tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') - tdSql.execute('''create table table_5 using stable_1 - tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') - tdSql.execute('''create table table_21 using stable_2 - tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') - #regular table - tdSql.execute('''create table regular_table_1 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) ;''') - - for i in range(self.num): - tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' - % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, - i, i, random.random(), random.random(), 1262304000001 + i)) - tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' - % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, - i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) - tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' - % (self.ts + i, random.randint(-2147483647, 2147483647), - random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), - random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), - random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) - tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' - % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, - i, i, random.random(), random.random(), 1262304000001 + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' - % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, - i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' - % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), - random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), - random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), - random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' - % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) - - tdLog.info("========== operator=1(OP_TableScan) ==========") - tdLog.info("========== operator=7(OP_Project) ==========") - sql = '''select * from stable_1''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - sql = '''select * from regular_table_1''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - - tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") - sql = '''select last_row(*) from stable_1;''' - tdSql.query(sql) - tdSql.checkData(0,1,self.num-1) - - tdLog.info("========== operator=6(OP_Aggregate) ==========") - sql = '''select last_row(*) from regular_table_1;''' - tdSql.query(sql) - tdSql.checkData(0,1,self.num-1) - - tdLog.info("========== operator=9(OP_Limit) ==========") - sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' - tdSql.query(sql) - tdSql.checkRows(5) - sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' - tdSql.query(sql) - tdSql.checkRows(1) - - sql = '''select * from regular_table_1 ;''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - sql = '''select last_row(*) from (select * from regular_table_1);''' - tdSql.query(sql) - tdSql.checkRows(1) - tdSql.checkData(0,1,self.num-1) - - - sql = '''select last_row(*) from - ((select * from table_0) union all - (select * from table_1) union all - (select * from table_2));''' - tdSql.error(sql) - - tdLog.info("========== operator=16(OP_DummyInput) ==========") - sql = '''select last_row(*) from - ((select last_row(*) from table_0) union all - (select last_row(*) from table_1) union all - (select last_row(*) from table_2));''' - tdSql.error(sql) - - sql = '''select last_row(*) from - ((select * from table_0 limit 5 offset 5) union all - (select * from table_1 limit 5 offset 5) union all - (select * from regular_table_1 limit 5 offset 5));''' - tdSql.error(sql) - - tdLog.info("========== operator=10(OP_SLimit) ==========") - sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' - tdSql.query(sql) - tdSql.checkRows(3) - - sql = '''select last_row(*) from - ((select * from table_0) union all - (select * from table_1) union all - (select * from table_2));''' - tdSql.error(sql) - - tdLog.info("========== operator=20(OP_Distinct) ==========") - tdLog.info("========== operator=4(OP_TagScan) ==========") - sql = '''select distinct(t_bool) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(2) - sql = '''select distinct(loc) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_int) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_bigint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_smallint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_tinyint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_nchar) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_float) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_double) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(3) - sql = '''select distinct(tbname) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - - tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") - sql = '''select last(q_int),first(q_int) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_bool),first(q_bool) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_binary),first(q_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_float),first(q_float) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_double),first(q_double) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_ts),first(q_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), - last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), - first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), - last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), - first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=8(OP_Groupby) ==========") - sql = '''select stddev(q_int) from table_0 group by q_int;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' - tdSql.query(sql) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' - tdSql.query(sql) - - tdLog.info("========== operator=11(OP_TimeWindow) ==========") - sql = '''select last(q_int) from table_0 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=12(OP_SessionWindow) ==========") - sql = '''select count(*) from table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*) from regular_table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=13(OP_Fill) ==========") - sql = '''select sum(q_int) from table_0 - where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - #TD-5190 - sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 - where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - - tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") - sql = '''select avg(q_int) from stable_1 where ts=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having sum(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having avg(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having min(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having max(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having first(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having last(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - - tdLog.info("========== operator=21(OP_Join) ==========") - sql = '''select t1.q_int,t2.q_int from - (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from table_1) t1 , (select * from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from regular_table_1) t1 , (select * from table_0) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from stable_1) t1 , (select * from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from regular_table_1) t1 , (select * from stable_1) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.*,t3.* from - (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 - where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - - tdLog.info("========== operator=22(OP_StateWindow) ==========") - sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_1 state_window(q_bigint);''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 state_window(q_smallint);''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - - endTime = time.time() - print("total time %ds" % (endTime - startTime)) - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 3647f7defcd52e1ce374e6af3c504e50383cc441 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 14:05:42 +0800 Subject: [PATCH 26/46] [TD-5074]:test operator cost --- tests/pytest/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index eb068b6585..d54c413fba 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -241,7 +241,7 @@ python3 ./test.py -f query/queryStateWindow.py python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py -python3 ./test.py -f query/operator.py +python3 ./test.py -f query/operator_cost.py python3 test.py -f query/nestedQuery/queryWithSpread.py #stream From e4a67f8046445013895402e47352f55a308a3044 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 14:22:18 +0800 Subject: [PATCH 27/46] update local CI --- tests/Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index c75427b5f4..fb96b669ff 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -48,7 +48,7 @@ pipeline { } } stage('test_b1') { - agent{label 'master'} + agent{label 'slad2'} steps { pre_test() @@ -62,7 +62,7 @@ pipeline { } stage('test_crash_gen') { - agent{label "slad2"} + agent{label "slad3"} steps { pre_test() sh ''' @@ -141,7 +141,7 @@ pipeline { } stage('test_valgrind') { - agent{label "slad3"} + agent{label "slad4"} steps { pre_test() From 8f0be342a8599ac2eb0fdc349d6f0d9d1078f142 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 15:06:30 +0800 Subject: [PATCH 28/46] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py index b94d5fa3b3..774a1e5f42 100644 --- a/tests/pytest/query/operator_cost.py +++ b/tests/pytest/query/operator_cost.py @@ -195,9 +195,6 @@ class TDTestCase: sql = '''select distinct(t_tinyint) from stable_1;''' tdSql.query(sql) tdSql.checkRows(6) - sql = '''select distinct(t_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) sql = '''select distinct(t_nchar) from stable_1;''' tdSql.query(sql) tdSql.checkRows(6) From 0b1c661fff52c9ba301edbab66009b50c226edca Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 28 Jul 2021 16:03:18 +0800 Subject: [PATCH 29/46] [TD-5592]: fix change KV row value coredump --- src/common/inc/tdataformat.h | 3 +- src/common/src/tdataformat.c | 83 +++++++++++++----------------------- 2 files changed, 32 insertions(+), 54 deletions(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 829e771c70..f8394dc271 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -55,7 +55,7 @@ extern "C" { typedef struct { int8_t type; // Column type int16_t colId; // column ID - uint16_t bytes; // column bytes + int16_t bytes; // column bytes (restore to int16_t in case of misuse) uint16_t offset; // point offset in SDataRow after the header part. } STColumn; @@ -366,6 +366,7 @@ typedef struct { #define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) #define kvRowFree(r) tfree(r) #define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) +#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r)) #define kvRowTKey(r) (*(TKEY *)(kvRowValues(r))) #define kvRowKey(r) tdGetKey(kvRowTKey(r)) #define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r)) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 9f7432c90d..5a63588611 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -634,42 +634,28 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { SKVRow nrow = NULL; void * ptr = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE); - if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row + if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row int diff = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type]; - nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff); + int nRowLen = kvRowLen(row) + sizeof(SColIdx) + diff; + int oRowCols = kvRowNCols(row); + + ASSERT(diff > 0); + nrow = malloc(nRowLen); if (nrow == NULL) return -1; - kvRowSetLen(nrow, kvRowLen(row) + (uint16_t)sizeof(SColIdx) + diff); - kvRowSetNCols(nrow, kvRowNCols(row) + 1); + kvRowSetLen(nrow, nRowLen); + kvRowSetNCols(nrow, oRowCols + 1); - if (ptr == NULL) { - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * kvRowNCols(row)); - memcpy(kvRowValues(nrow), kvRowValues(row), POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row))); - int colIdx = kvRowNCols(nrow) - 1; - kvRowColIdxAt(nrow, colIdx)->colId = colId; - kvRowColIdxAt(nrow, colIdx)->offset = (int16_t)(POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row))); - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff); - } else { - int16_t tlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row))); - if (tlen > 0) { - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), tlen); - memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset); - } + memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * oRowCols); + memcpy(kvRowValues(nrow), kvRowValues(row), kvRowValLen(row)); - int colIdx = tlen / sizeof(SColIdx); - kvRowColIdxAt(nrow, colIdx)->colId = colId; - kvRowColIdxAt(nrow, colIdx)->offset = ((SColIdx *)ptr)->offset; - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff); + pColIdx = kvRowColIdxAt(nrow, oRowCols); + pColIdx->colId = colId; + pColIdx->offset = kvRowValLen(row); - for (int i = colIdx; i < kvRowNCols(row); i++) { - kvRowColIdxAt(nrow, i + 1)->colId = kvRowColIdxAt(row, i)->colId; - kvRowColIdxAt(nrow, i + 1)->offset = kvRowColIdxAt(row, i)->offset + diff; - } - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx)), - POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx))) + memcpy(kvRowColVal(nrow, pColIdx), value, diff); // copy new value - ); - } + tdSortKVRowByColIdx(nrow); *orow = nrow; free(row); @@ -680,9 +666,8 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place memcpy(pOldVal, value, varDataTLen(value)); - } else { // need to reallocate the memory - uint16_t diff = varDataTLen(value) - varDataTLen(pOldVal); - uint16_t nlen = kvRowLen(row) + diff; + } else { // need to reallocate the memory + int16_t nlen = kvRowLen(row) + (varDataTLen(value) - varDataTLen(pOldVal)); ASSERT(nlen > 0); nrow = malloc(nlen); if (nrow == NULL) return -1; @@ -690,30 +675,22 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { kvRowSetLen(nrow, nlen); kvRowSetNCols(nrow, kvRowNCols(row)); - // Copy part ahead - nlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row))); - ASSERT(nlen % sizeof(SColIdx) == 0); - if (nlen > 0) { - ASSERT(((SColIdx *)ptr)->offset > 0); - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), nlen); - memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset); + int zsize = sizeof(SColIdx) * kvRowNCols(row) + ((SColIdx *)ptr)->offset; + memcpy(kvRowColIdx(nrow), kvRowColIdx(row), zsize); + memcpy(kvRowColVal(nrow, ((SColIdx *)ptr)), value, varDataTLen(value)); + // Copy left value part + int lsize = kvRowLen(row) - TD_KV_ROW_HEAD_SIZE - zsize - varDataTLen(pOldVal); + if (lsize > 0) { + memcpy(POINTER_SHIFT(nrow, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(value)), + POINTER_SHIFT(row, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(pOldVal)), lsize); } - // Construct current column value - int colIdx = nlen / sizeof(SColIdx); - pColIdx = kvRowColIdxAt(nrow, colIdx); - pColIdx->colId = ((SColIdx *)ptr)->colId; - pColIdx->offset = ((SColIdx *)ptr)->offset; - memcpy(kvRowColVal(nrow, pColIdx), value, varDataTLen(value)); - - // Construct columns after - if (kvRowNCols(nrow) - colIdx - 1 > 0) { - for (int i = colIdx + 1; i < kvRowNCols(nrow); i++) { - kvRowColIdxAt(nrow, i)->colId = kvRowColIdxAt(row, i)->colId; - kvRowColIdxAt(nrow, i)->offset = kvRowColIdxAt(row, i)->offset + diff; + for (int i = 0; i < kvRowNCols(nrow); i++) { + pColIdx = kvRowColIdxAt(nrow, i); + + if (pColIdx->offset > ((SColIdx *)ptr)->offset) { + pColIdx->offset = pColIdx->offset - varDataTLen(pOldVal) + varDataTLen(value); } - memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)), - POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)))); } *orow = nrow; From aac0aeddfdf81725e9deb21b79301c3816cbf4bb Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 16:10:06 +0800 Subject: [PATCH 30/46] add SANITIZER parameter --- tests/Jenkinsfile | 29 ++++++++++++++++++++++++++++- tests/mas/Jenkinsfile | 29 ++++++++++++++++++++++++++++- 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index fb96b669ff..ed54515ba8 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -1,5 +1,32 @@ def pre_test(){ + sh ''' + sudo rmtaos||echo 'no taosd installed' + ''' + sh ''' + cd ${WKC} + git reset --hard + git checkout $BRANCH_NAME + git pull + git submodule update + cd ${WK} + git reset --hard + git checkout $BRANCH_NAME + git pull + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake -DMEMORY_SANITIZER=true .. > /dev/null + make > /dev/null + make install > /dev/null + pip3 install ${WKC}/src/connector/python + ''' + return 1 +} +def pre_test_p(){ + sh ''' sudo rmtaos||echo 'no taosd installed' ''' @@ -39,7 +66,7 @@ pipeline { stage('pytest') { agent{label 'slad1'} steps { - pre_test() + pre_test_p() sh ''' cd ${WKC}/tests find pytest -name '*'sql|xargs rm -rf diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile index ae2286298f..4dcc1d9c18 100644 --- a/tests/mas/Jenkinsfile +++ b/tests/mas/Jenkinsfile @@ -1,5 +1,32 @@ def pre_test(){ + sh ''' + sudo rmtaos||echo 'no taosd installed' + ''' + sh ''' + cd ${WKC} + git reset --hard + git checkout $BRANCH_NAME + git pull + git submodule update + cd ${WK} + git reset --hard + git checkout $BRANCH_NAME + git pull + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake -DMEMORY_SANITIZER=true .. > /dev/null + make > /dev/null + make install > /dev/null + pip3 install ${WKC}/src/connector/python/ || echo 0 + ''' + return 1 +} +def pre_test_p(){ + sh ''' sudo rmtaos||echo 'no taosd installed' ''' @@ -39,7 +66,7 @@ pipeline { stage('pytest') { agent{label 'slam1'} steps { - pre_test() + pre_test_p() sh ''' cd ${WKC}/tests find pytest -name '*'sql|xargs rm -rf From 2892d2624c953b0a2c2fa6865016a791c611ce49 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 28 Jul 2021 17:38:38 +0800 Subject: [PATCH 31/46] [TD-2639] : fix typo. --- documentation20/cn/03.architecture/02.replica/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/03.architecture/02.replica/docs.md b/documentation20/cn/03.architecture/02.replica/docs.md index 183306eed8..27ac7f123c 100644 --- a/documentation20/cn/03.architecture/02.replica/docs.md +++ b/documentation20/cn/03.architecture/02.replica/docs.md @@ -111,7 +111,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log) 3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。 4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理 -5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。 +5. vnode B应用在写入成功后,都需要调用syncConfirmForward通知sync模块已经写入成功。 6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。 7. 如果quorum大于1,vnode A需要等待vnode B或其他副本对Forward消息的确认。 8. 如果quorum大于1,vnode A收到quorum-1条确认消息后,调用回调函数confirmForward,通知应用写入成功。 From 2e7e28781848510446e33f5e122bcaeb28c90f89 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 28 Jul 2021 17:53:59 +0800 Subject: [PATCH 32/46] [TD-5591]: taosdemo coredump when query 4096 columns. (#7054) --- src/kit/taosdemo/taosdemo.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c4dec6a231..32bad230cf 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -92,7 +92,6 @@ extern char configDir[]; #define MAX_SUPER_TABLE_COUNT 200 #define MAX_QUERY_SQL_COUNT 100 -#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE #define MAX_DATABASE_COUNT 256 #define INPUT_BUF_LEN 256 @@ -383,7 +382,7 @@ typedef struct SpecifiedQueryInfo_S { uint64_t queryTimes; bool subscribeRestart; int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume[MAX_QUERY_SQL_COUNT]; int endAfterConsume[MAX_QUERY_SQL_COUNT]; @@ -406,7 +405,7 @@ typedef struct SuperQueryInfo_S { int64_t childTblCount; char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume; int endAfterConsume; @@ -1253,14 +1252,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { // fetch the records row by row while((row = taos_fetch_row(res))) { - if (totalLen >= 100*1024*1024 - 32000) { + if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) { if (strlen(pThreadInfo->filePath) > 0) appendResultBufToFile(databuf, pThreadInfo); totalLen = 0; memset(databuf, 0, 100*1024*1024); } num_rows++; - char temp[16000] = {0}; + char temp[HEAD_BUFF_LEN] = {0}; int len = taos_print_row(temp, row, fields, num_fields); len += sprintf(temp + len, "\n"); //printf("query result:%s\n", temp); @@ -2165,15 +2164,15 @@ static void printfDbInfoForQueryToFile( } static void printfQuerySystemInfo(TAOS * taos) { - char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; - char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; + char filename[BUFFER_SIZE+1] = {0}; + char buffer[BUFFER_SIZE+1] = {0}; TAOS_RES* res; time_t t; struct tm* lt; time(&t); lt = localtime(&t); - snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", + snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); @@ -2205,12 +2204,12 @@ static void printfQuerySystemInfo(TAOS * taos) { printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); + snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); // show db.stables - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); + snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); free(dbInfos[i]); @@ -4549,7 +4548,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], - sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + sqlStr->valuestring, BUFFER_SIZE); // default value is -1, which mean infinite loop g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; @@ -4771,7 +4770,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - MAX_QUERY_SQL_LENGTH); + BUFFER_SIZE); cJSON *result = cJSON_GetObjectItem(sql, "result"); if (result != NULL && result->type == cJSON_String @@ -7425,14 +7424,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { tstrncpy(outSql, inSql, pos - inSql + 1); //printf("1: %s\n", outSql); - strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1); + strncat(outSql, subTblName, BUFFER_SIZE - 1); //printf("2: %s\n", outSql); - strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1); + strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1); //printf("3: %s\n", outSql); } static void *superTableQuery(void *sarg) { - char sqlstr[MAX_QUERY_SQL_LENGTH]; + char sqlstr[BUFFER_SIZE]; threadInfo *pThreadInfo = (threadInfo *)sarg; setThreadName("superTableQuery"); @@ -7735,7 +7734,7 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[MAX_QUERY_SQL_LENGTH]; + char subSqlstr[BUFFER_SIZE]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; uint64_t tsubSeq; From a9c51e251a3f3c78a67168e63065a6e12283cb7b Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 28 Jul 2021 18:28:35 +0800 Subject: [PATCH 33/46] remove systemctl --- tests/Jenkinsfile | 4 ++-- tests/mas/Jenkinsfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index ed54515ba8..b5b068381d 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -119,7 +119,7 @@ pipeline { } sh''' - systemctl start taosd + nohup taosd >/dev/null & sleep 10 ''' catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -155,7 +155,7 @@ pipeline { ''' } sh ''' - systemctl stop taosd + pkill -9 taosd cd ${WKC}/tests ./test-all.sh b2 date diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile index 4dcc1d9c18..52b29da92e 100644 --- a/tests/mas/Jenkinsfile +++ b/tests/mas/Jenkinsfile @@ -119,7 +119,7 @@ pipeline { } sh''' - systemctl start taosd + nohup taosd >/dev/null & sleep 10 ''' catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -163,7 +163,7 @@ pipeline { ''' } sh ''' - systemctl stop taosd + pkill -9 taosd cd ${WKC}/tests ./test-all.sh b2 date From 175fd1c93d7b4073a0ffe93e4ea236f1cdb32235 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Wed, 28 Jul 2021 18:32:34 +0800 Subject: [PATCH 34/46] [TD-5074]:test operator cost --- tests/pytest/query/operator_cost.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py index 774a1e5f42..27de3531eb 100644 --- a/tests/pytest/query/operator_cost.py +++ b/tests/pytest/query/operator_cost.py @@ -207,9 +207,9 @@ class TDTestCase: sql = '''select distinct(t_ts) from stable_1;''' tdSql.query(sql) tdSql.checkRows(3) - sql = '''select distinct(tbname) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) + # sql = '''select distinct(tbname) from stable_1;''' + # tdSql.query(sql) + # tdSql.checkRows(6) tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") sql = '''select last(q_int),first(q_int) from stable_1;''' From 21bdbb9e507a496cd8e7b68b8f2a67ed35fd6be6 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 29 Jul 2021 10:32:20 +0800 Subject: [PATCH 35/46] [TD-3666] : fix description about binary storage length. --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 72f4876dcf..6d39c25565 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | +| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | | 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | | 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | | 9 | BOOL | 1 | 布尔型,{true, false} | From c95ad354387d1a5039919a387620bbb06e837399 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 29 Jul 2021 11:07:06 +0800 Subject: [PATCH 36/46] revert sani --- tests/Jenkinsfile | 2 +- tests/mas/Jenkinsfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index b5b068381d..3185d9ddba 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -18,7 +18,7 @@ def pre_test(){ rm -rf ${WK}/debug mkdir debug cd debug - cmake -DMEMORY_SANITIZER=true .. > /dev/null + cmake .. > /dev/null make > /dev/null make install > /dev/null pip3 install ${WKC}/src/connector/python diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile index 52b29da92e..e44df002ef 100644 --- a/tests/mas/Jenkinsfile +++ b/tests/mas/Jenkinsfile @@ -18,7 +18,7 @@ def pre_test(){ rm -rf ${WK}/debug mkdir debug cd debug - cmake -DMEMORY_SANITIZER=true .. > /dev/null + cmake .. > /dev/null make > /dev/null make install > /dev/null pip3 install ${WKC}/src/connector/python/ || echo 0 From f77874a5f4f9d4822f6a84d1502468dd77db3648 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Thu, 29 Jul 2021 13:14:52 +0800 Subject: [PATCH 37/46] [TD-5534]:fix the coverity high risk of client --- src/client/src/tscSQLParser.c | 2 +- src/client/src/tscServer.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b7d513cfbe..7d4434a922 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8376,7 +8376,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS return invalidOperationMsg(msgBuf, "subquery alias name too long"); } - tstrncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, MIN(subInfo->aliasName.n, sizeof(pTableMetaInfo1->aliasName))); + tstrncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, sizeof(pTableMetaInfo1->aliasName)); } taosArrayPush(pQueryInfo->pUpstream, &pSub); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 36d3d156db..01c1e4a4a4 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -164,7 +164,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) { vgroupInfo.inUse = pEpSet->inUse; vgroupInfo.numOfEps = pEpSet->numOfEps; for (int32_t i = 0; i < vgroupInfo.numOfEps; i++) { - tstrncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN); // buffer not null terminated risk + tstrncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN); vgroupInfo.ep[i].port = pEpSet->port[i]; } From ba8d429f2ce7239cf17e99dbcc3a7db310bc337a Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 29 Jul 2021 13:48:37 +0800 Subject: [PATCH 38/46] fix error --- tests/Jenkinsfile | 2 +- tests/mas/Jenkinsfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index 3185d9ddba..eeb56bd3b9 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -155,7 +155,7 @@ pipeline { ''' } sh ''' - pkill -9 taosd + pkill -9 taosd || echo 1 cd ${WKC}/tests ./test-all.sh b2 date diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile index e44df002ef..84c2d8e4c2 100644 --- a/tests/mas/Jenkinsfile +++ b/tests/mas/Jenkinsfile @@ -163,7 +163,7 @@ pipeline { ''' } sh ''' - pkill -9 taosd + pkill -9 taosd || echo 1 cd ${WKC}/tests ./test-all.sh b2 date From 61fb774237bb4d6d9695d4f2f0c5cc1c542a52d1 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 29 Jul 2021 14:03:59 +0800 Subject: [PATCH 39/46] [TD-5062] Feature/row partial update (#6970) * [TD-5062]: support partial update --- src/client/src/tscUtil.c | 4 +- src/common/inc/tdataformat.h | 180 ++++++++++-- src/common/src/tdataformat.c | 129 ++++++++- src/common/src/ttypes.c | 32 +-- src/inc/taosdef.h | 8 +- src/inc/tsdb.h | 2 +- src/inc/ttype.h | 8 +- src/tsdb/inc/tsdbBuffer.h | 4 +- src/tsdb/inc/tsdbReadImpl.h | 9 +- src/tsdb/inc/tsdbRowMergeBuf.h | 43 +++ src/tsdb/inc/tsdbint.h | 5 +- src/tsdb/src/tsdbBuffer.c | 4 +- src/tsdb/src/tsdbCommit.c | 4 +- src/tsdb/src/tsdbCommitQueue.c | 2 +- src/tsdb/src/tsdbCompact.c | 2 +- src/tsdb/src/tsdbMain.c | 7 +- src/tsdb/src/tsdbMemTable.c | 366 +++++++++++------------- src/tsdb/src/tsdbRead.c | 507 ++++++++++++++++++++------------- src/tsdb/src/tsdbReadImpl.c | 8 +- src/tsdb/src/tsdbRowMergeBuf.c | 30 ++ src/util/inc/tfunctional.h | 56 ++++ src/util/inc/tskiplist.h | 18 +- src/util/src/tfunctional.c | 48 ++++ src/util/src/tskiplist.c | 49 +++- 24 files changed, 1045 insertions(+), 480 deletions(-) create mode 100644 src/tsdb/inc/tsdbRowMergeBuf.h create mode 100644 src/tsdb/src/tsdbRowMergeBuf.c create mode 100644 src/util/inc/tfunctional.h create mode 100644 src/util/src/tfunctional.c diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 9dc541b1a6..1c610b67a5 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1864,8 +1864,8 @@ static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) { while (i < nColsBound) { int16_t colId = payloadColId(p); uint8_t colType = payloadColType(p); - tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, toffset); - toffset += sizeof(SColIdx); + tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset); + //toffset += sizeof(SColIdx); p = payloadNextCol(p); ++i; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index f8394dc271..47bd8a72b2 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -232,6 +232,83 @@ static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t o } } +static FORCE_INLINE void *tdGetPtrToCol(SDataRow row, STSchema *pSchema, int idx) { + return POINTER_SHIFT(row, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset); +} + +static FORCE_INLINE void *tdGetColOfRowBySchema(SDataRow row, STSchema *pSchema, int idx) { + int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset; + int8_t type = pSchema->columns[idx].type; + + return tdGetRowDataOfCol(row, type, offset); +} + +static FORCE_INLINE bool tdIsColOfRowNullBySchema(SDataRow row, STSchema *pSchema, int idx) { + int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset; + int8_t type = pSchema->columns[idx].type; + + return isNull(tdGetRowDataOfCol(row, type, offset), type); +} + +static FORCE_INLINE void tdSetColOfRowNullBySchema(SDataRow row, STSchema *pSchema, int idx) { + int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset; + int8_t type = pSchema->columns[idx].type; + int16_t bytes = pSchema->columns[idx].bytes; + + setNull(tdGetRowDataOfCol(row, type, offset), type, bytes); +} + +static FORCE_INLINE void tdCopyColOfRowBySchema(SDataRow dst, STSchema *pDstSchema, int dstIdx, SDataRow src, STSchema *pSrcSchema, int srcIdx) { + int8_t type = pDstSchema->columns[dstIdx].type; + ASSERT(type == pSrcSchema->columns[srcIdx].type); + void *pData = tdGetPtrToCol(dst, pDstSchema, dstIdx); + void *value = tdGetPtrToCol(src, pSrcSchema, srcIdx); + + switch (type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + *(VarDataOffsetT *)pData = *(VarDataOffsetT *)value; + pData = POINTER_SHIFT(dst, *(VarDataOffsetT *)pData); + value = POINTER_SHIFT(src, *(VarDataOffsetT *)value); + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t *)pData = *(uint8_t *)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t *)pData = *(uint16_t *)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t *)pData = *(uint32_t *)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t *)pData = *(uint64_t *)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (pSrcSchema->columns[srcIdx].colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY *)pData = tdGetKey(*(TKEY *)value); + } else { + *(TSKEY *)pData = *(TSKEY *)value; + } + break; + default: + memcpy(pData, value, pSrcSchema->columns[srcIdx].bytes); + } +} + + // ----------------- Data column structure typedef struct SDataCol { int8_t type; // column type @@ -335,7 +412,7 @@ void tdResetDataCols(SDataCols *pCols); int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); SDataCols *tdFreeDataCols(SDataCols *pCols); -int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset); +int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull); // ----------------- K-V data row structure /* |<-------------------------------------- len -------------------------------------------->| @@ -398,9 +475,9 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) { } // offset here not include kvRow header length -static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t offset) { +static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) { ASSERT(value != NULL); - int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE; + int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE; SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset); char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row)); @@ -411,7 +488,7 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t memcpy(ptr, value, varDataTLen(value)); kvRowLen(row) += varDataTLen(value); } else { - if (offset == 0) { + if (*offset == 0) { ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); TKEY tvalue = tdGetTKEY(*(TSKEY *)value); memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]); @@ -420,9 +497,27 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t } kvRowLen(row) += TYPE_BYTES[type]; } + *offset += sizeof(SColIdx); return 0; } +// NOTE: offset here including the header size +static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); } + +static FORCE_INLINE void *tdGetKVRowValOfColEx(SKVRow row, int16_t colId, int32_t *nIdx) { + while (*nIdx < kvRowNCols(row)) { + SColIdx *pColIdx = kvRowColIdxAt(row, *nIdx); + if (pColIdx->colId == colId) { + ++(*nIdx); + return tdGetKvRowDataOfCol(row, pColIdx->offset); + } else if (pColIdx->colId > colId) { + return NULL; + } else { + ++(*nIdx); + } + } + return NULL; +} // ----------------- K-V data row builder typedef struct { @@ -495,7 +590,7 @@ typedef void *SMemRow; #define TD_MEM_ROW_KV_VER_SIZE sizeof(int16_t) #define TD_MEM_ROW_KV_TYPE_VER_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE) #define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE) -// #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) +#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) #define SMEM_ROW_DATA 0U // SDataRow #define SMEM_ROW_KV 1U // SKVRow @@ -538,27 +633,80 @@ typedef void *SMemRow; #define memRowSetType(r, t) (memRowType(r) = (t)) #define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l)) -#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowKvSetVersion(r, v)) +#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v)) #define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r)) #define memRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_MEM_ROW_DATA_HEAD_SIZE) #define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r)) SMemRow tdMemRowDup(SMemRow row); -void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols); +void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull); + // NOTE: offset here including the header size -static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); } -// NOTE: offset here including the header size -static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int8_t type, int32_t offset) { +static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) { if (isDataRow(row)) { - return tdGetRowDataOfCol(row, type, offset); - } else if (isKvRow(row)) { - return tdGetKvRowDataOfCol(row, offset); + return tdGetRowDataOfCol(memRowDataBody(row), colType, offset); } else { - ASSERT(0); + return tdGetKVRowValOfCol(memRowKvBody(row), colId); } - return NULL; } +/** + * NOTE: + * 1. Applicable to scan columns one by one + * 2. offset here including the header size + */ +static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_t colType, int32_t offset, + int32_t *kvNIdx) { + if (isDataRow(row)) { + return tdGetRowDataOfCol(memRowDataBody(row), colType, offset); + } else { + return tdGetKVRowValOfColEx(memRowKvBody(row), colId, kvNIdx); + } +} + +static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset, + int32_t *kvOffset) { + if (isDataRow(row)) { + tdAppendColVal(memRowDataBody(row), value, type, offset); + } else { + tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset); + } + return 0; +} + +// make sure schema->flen appended for SDataRow +static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value, int8_t colType) { + int32_t len = 0; + if (IS_VAR_DATA_TYPE(colType)) { + len += varDataTLen(value); + if (rowType == SMEM_ROW_KV) { + len += sizeof(SColIdx); + } + } else { + if (rowType == SMEM_ROW_KV) { + len += TYPE_BYTES[colType]; + len += sizeof(SColIdx); + } + } + return len; +} + + +typedef struct { + int16_t colId; + uint8_t colType; + char* colVal; +} SColInfo; + +static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t colType, char* colVal) { + colInfo->colId = colId; + colInfo->colType = colType; + colInfo->colVal = colVal; +} + +SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2); + + // ----------------- Raw payload structure for row: /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| * | |<----------------- flen ------------->|<--- value part --->| @@ -608,4 +756,4 @@ static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SH } #endif -#endif // _TD_DATA_FORMAT_H_ \ No newline at end of file +#endif // _TD_DATA_FORMAT_H_ diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 5a63588611..8ef3d083c7 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -17,9 +17,10 @@ #include "talgo.h" #include "tcoding.h" #include "wchar.h" +#include "tarray.h" static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, - int limit2, int tRows); + int limit2, int tRows, bool forceSetNull); /** * Duplicate the schema and return a new object @@ -418,7 +419,8 @@ void tdResetDataCols(SDataCols *pCols) { } } } -static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) { + +static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row)); int rcol = 0; @@ -452,8 +454,10 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols } else if (pRowCol->colId < pDataCol->colId) { rcol++; } else { - // dataColSetNullAt(pDataCol, pCols->numOfRows); - dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + if(forceSetNull) { + //dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + } dcol++; } } @@ -461,7 +465,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols pCols->numOfRows++; } -static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols) { +static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row)); int rcol = 0; @@ -498,8 +502,10 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo } else if (colIdx->colId < pDataCol->colId) { ++rcol; } else { - // dataColSetNullAt(pDataCol, pCols->numOfRows); - dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + if (forceSetNull) { + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + } ++dcol; } } @@ -507,17 +513,17 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo pCols->numOfRows++; } -void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols) { +void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { if (isDataRow(row)) { - tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols); + tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull); } else if (isKvRow(row)) { - tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols); + tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull); } else { ASSERT(0); } } -int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) { +int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); int offset = 0; @@ -546,7 +552,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * int iter1 = 0; tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows, - pTarget->numOfRows + rowsToMerge); + pTarget->numOfRows + rowsToMerge, forceSetNull); } tdFreeDataCols(pTarget); @@ -559,7 +565,7 @@ _err: // src2 data has more priority than src1 static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, - int limit2, int tRows) { + int limit2, int tRows, bool forceSetNull) { tdResetDataCols(target); ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows); @@ -588,7 +594,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) { for (int i = 0; i < src2->numOfCols; i++) { ASSERT(target->cols[i].type == src2->cols[i].type); - if (src2->cols[i].len > 0) { + if (src2->cols[i].len > 0 && (forceSetNull || (!forceSetNull && !isNull(src2->cols[i].pData, src2->cols[i].type)))) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, target->maxPoints); } @@ -761,4 +767,97 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) { memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size); return row; -} \ No newline at end of file +} + +SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) { +#if 0 + ASSERT(memRowKey(row1) == memRowKey(row2)); + ASSERT(schemaVersion(pSchema1) == memRowVersion(row1)); + ASSERT(schemaVersion(pSchema2) == memRowVersion(row2)); + ASSERT(schemaVersion(pSchema1) >= schemaVersion(pSchema2)); +#endif + + SArray *stashRow = taosArrayInit(pSchema1->numOfCols, sizeof(SColInfo)); + if (stashRow == NULL) { + return NULL; + } + + SMemRow pRow = buffer; + SDataRow dataRow = memRowDataBody(pRow); + memRowSetType(pRow, SMEM_ROW_DATA); + dataRowSetVersion(dataRow, schemaVersion(pSchema1)); // use latest schema version + dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pSchema1->flen)); + + TDRowTLenT dataLen = 0, kvLen = TD_MEM_ROW_KV_HEAD_SIZE; + + int32_t i = 0; // row1 + int32_t j = 0; // row2 + int32_t nCols1 = schemaNCols(pSchema1); + int32_t nCols2 = schemaNCols(pSchema2); + SColInfo colInfo = {0}; + int32_t kvIdx1 = 0, kvIdx2 = 0; + + while (i < nCols1) { + STColumn *pCol = schemaColAt(pSchema1, i); + void * val1 = tdGetMemRowDataOfColEx(row1, pCol->colId, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset, &kvIdx1); + // if val1 != NULL, use val1; + if (val1 != NULL && !isNull(val1, pCol->type)) { + tdAppendColVal(dataRow, val1, pCol->type, pCol->offset); + kvLen += tdGetColAppendLen(SMEM_ROW_KV, val1, pCol->type); + setSColInfo(&colInfo, pCol->colId, pCol->type, val1); + taosArrayPush(stashRow, &colInfo); + ++i; // next col + continue; + } + + void *val2 = NULL; + while (j < nCols2) { + STColumn *tCol = schemaColAt(pSchema2, j); + if (tCol->colId < pCol->colId) { + ++j; + continue; + } + if (tCol->colId == pCol->colId) { + val2 = tdGetMemRowDataOfColEx(row2, tCol->colId, tCol->type, TD_DATA_ROW_HEAD_SIZE + tCol->offset, &kvIdx2); + } else if (tCol->colId > pCol->colId) { + // set NULL + } + break; + } // end of while(jtype); + } + tdAppendColVal(dataRow, val2, pCol->type, pCol->offset); + if (!isNull(val2, pCol->type)) { + kvLen += tdGetColAppendLen(SMEM_ROW_KV, val2, pCol->type); + setSColInfo(&colInfo, pCol->colId, pCol->type, val2); + taosArrayPush(stashRow, &colInfo); + } + + ++i; // next col + } + + dataLen = memRowTLen(pRow); + + if (kvLen < dataLen) { + // scan stashRow and generate SKVRow + memset(buffer, 0, sizeof(dataLen)); + SMemRow tRow = buffer; + memRowSetType(tRow, SMEM_ROW_KV); + SKVRow kvRow = (SKVRow)memRowKvBody(tRow); + int16_t nKvNCols = (int16_t) taosArrayGetSize(stashRow); + kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nKvNCols)); + kvRowSetNCols(kvRow, nKvNCols); + memRowSetKvVersion(tRow, pSchema1->version); + + int32_t toffset = 0; + int16_t k; + for (k = 0; k < nKvNCols; ++k) { + SColInfo *pColInfo = taosArrayGet(stashRow, k); + tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset); + } + ASSERT(kvLen == memRowTLen(tRow)); + } + taosArrayDestroy(stashRow); + return buffer; +} diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 13bcfe6480..eeffe49adc 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -409,7 +409,7 @@ bool isValidDataType(int32_t type) { return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT; } -void setVardataNull(char* val, int32_t type) { +void setVardataNull(void* val, int32_t type) { if (type == TSDB_DATA_TYPE_BINARY) { varDataSetLen(val, sizeof(int8_t)); *(uint8_t*) varDataVal(val) = TSDB_DATA_BINARY_NULL; @@ -421,75 +421,75 @@ void setVardataNull(char* val, int32_t type) { } } -void setNull(char *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); } +void setNull(void *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); } -void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { +void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems) { switch (type) { case TSDB_DATA_TYPE_BOOL: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BOOL_NULL; + *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BOOL_NULL; } break; case TSDB_DATA_TYPE_TINYINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_TINYINT_NULL; + *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_TINYINT_NULL; } break; case TSDB_DATA_TYPE_SMALLINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_SMALLINT_NULL; + *(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_SMALLINT_NULL; } break; case TSDB_DATA_TYPE_INT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_INT_NULL; + *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_INT_NULL; } break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BIGINT_NULL; + *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BIGINT_NULL; } break; case TSDB_DATA_TYPE_UTINYINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UTINYINT_NULL; + *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UTINYINT_NULL; } break; case TSDB_DATA_TYPE_USMALLINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_USMALLINT_NULL; + *(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_USMALLINT_NULL; } break; case TSDB_DATA_TYPE_UINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UINT_NULL; + *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UINT_NULL; } break; case TSDB_DATA_TYPE_UBIGINT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UBIGINT_NULL; + *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UBIGINT_NULL; } break; case TSDB_DATA_TYPE_FLOAT: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_FLOAT_NULL; + *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_FLOAT_NULL; } break; case TSDB_DATA_TYPE_DOUBLE: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_DOUBLE_NULL; + *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_DOUBLE_NULL; } break; case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_BINARY: for (int32_t i = 0; i < numOfElems; ++i) { - setVardataNull(val + i * bytes, type); + setVardataNull(POINTER_SHIFT(val, i * bytes), type); } break; default: { for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * tDataTypes[TSDB_DATA_TYPE_INT].bytes) = TSDB_DATA_INT_NULL; + *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[TSDB_DATA_TYPE_INT].bytes)) = TSDB_DATA_INT_NULL; } break; } diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index ca8ad3cc09..4a5497795f 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -307,7 +307,7 @@ do { \ #define TSDB_DEFAULT_WAL_LEVEL 1 #define TSDB_MIN_DB_UPDATE 0 -#define TSDB_MAX_DB_UPDATE 1 +#define TSDB_MAX_DB_UPDATE 2 #define TSDB_DEFAULT_DB_UPDATE_OPTION 0 #define TSDB_MIN_DB_CACHE_LAST_ROW 0 @@ -435,6 +435,12 @@ typedef enum { TSDB_CHECK_ITEM_MAX } ECheckItemType; +typedef enum { + TD_ROW_DISCARD_UPDATE = 0, + TD_ROW_OVERWRITE_UPDATE = 1, + TD_ROW_PARTIAL_UPDATE = 2 +} TDUpdateConfig; + extern char *qtypeStr[]; #ifdef __cplusplus diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 79d9029dbc..7fd6d8c10c 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -111,7 +111,7 @@ typedef struct { uint64_t superUid; STSchema * schema; STSchema * tagSchema; - SDataRow tagValues; + SKVRow tagValues; char * sql; } STableCfg; diff --git a/src/inc/ttype.h b/src/inc/ttype.h index fcd2651c70..44e666106a 100644 --- a/src/inc/ttype.h +++ b/src/inc/ttype.h @@ -141,7 +141,7 @@ typedef struct { #define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX) #define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX) -static FORCE_INLINE bool isNull(const char *val, int32_t type) { +static FORCE_INLINE bool isNull(const void *val, int32_t type) { switch (type) { case TSDB_DATA_TYPE_BOOL: return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; @@ -193,9 +193,9 @@ extern tDataTypeDescriptor tDataTypes[15]; bool isValidDataType(int32_t type); -void setVardataNull(char* val, int32_t type); -void setNull(char *val, int32_t type, int32_t bytes); -void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems); +void setVardataNull(void* val, int32_t type); +void setNull(void *val, int32_t type, int32_t bytes); +void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems); const void *getNullValue(int32_t type); void assignVal(char *val, const char *src, int32_t len, int32_t type); diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h index 4e18ac711a..ec6b057aef 100644 --- a/src/tsdb/inc/tsdbBuffer.h +++ b/src/tsdb/inc/tsdbBuffer.h @@ -40,7 +40,7 @@ void tsdbFreeBufPool(STsdbBufPool* pBufPool); int tsdbOpenBufPool(STsdbRepo* pRepo); void tsdbCloseBufPool(STsdbRepo* pRepo); SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); -int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks); +int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks); void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode); -#endif /* _TD_TSDB_BUFFER_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/inc/tsdbReadImpl.h b/src/tsdb/inc/tsdbReadImpl.h index e7840d9723..814c4d1305 100644 --- a/src/tsdb/inc/tsdbReadImpl.h +++ b/src/tsdb/inc/tsdbReadImpl.h @@ -16,6 +16,13 @@ #ifndef _TD_TSDB_READ_IMPL_H_ #define _TD_TSDB_READ_IMPL_H_ +#include "tfs.h" +#include "tsdb.h" +#include "os.h" +#include "tsdbFile.h" +#include "tskiplist.h" +#include "tsdbMeta.h" + typedef struct SReadH SReadH; typedef struct { @@ -143,4 +150,4 @@ static FORCE_INLINE int tsdbMakeRoom(void **ppBuf, size_t size) { return 0; } -#endif /*_TD_TSDB_READ_IMPL_H_*/ \ No newline at end of file +#endif /*_TD_TSDB_READ_IMPL_H_*/ diff --git a/src/tsdb/inc/tsdbRowMergeBuf.h b/src/tsdb/inc/tsdbRowMergeBuf.h new file mode 100644 index 0000000000..302bf25750 --- /dev/null +++ b/src/tsdb/inc/tsdbRowMergeBuf.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TSDB_ROW_MERGE_BUF_H +#define TSDB_ROW_MERGE_BUF_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tsdb.h" +#include "tchecksum.h" +#include "tsdbReadImpl.h" + +typedef void* SMergeBuf; + +SDataRow tsdbMergeTwoRows(SMergeBuf *pBuf, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2); + +static FORCE_INLINE int tsdbMergeBufMakeSureRoom(SMergeBuf *pBuf, STSchema* pSchema1, STSchema* pSchema2) { + return tsdbMakeRoom(pBuf, MAX(dataRowMaxBytesFromSchema(pSchema1), dataRowMaxBytesFromSchema(pSchema2))); +} + +static FORCE_INLINE void tsdbFreeMergeBuf(SMergeBuf buf) { + taosTZfree(buf); +} + +#ifdef __cplusplus +} +#endif + +#endif /* ifndef TSDB_ROW_MERGE_BUF_H */ diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index dd43e39310..757a0951e8 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -68,6 +68,8 @@ extern "C" { #include "tsdbCompact.h" // Commit Queue #include "tsdbCommitQueue.h" + +#include "tsdbRowMergeBuf.h" // Main definitions struct STsdbRepo { uint8_t state; @@ -92,6 +94,7 @@ struct STsdbRepo { pthread_mutex_t mutex; bool repoLocked; int32_t code; // Commit code + SMergeBuf mergeBuf; //used when update=2 bool inCompact; // is in compact process? }; @@ -139,4 +142,4 @@ static FORCE_INLINE int tsdbGetNextMaxTables(int tid) { } #endif -#endif /* _TD_TSDB_INT_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_INT_H_ */ diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index 429ea8e0ce..e675bf6f9d 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -159,7 +159,7 @@ _err: static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } -int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) { +int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) { if (oldTotalBlocks == pRepo->config.totalBlocks) { return TSDB_CODE_SUCCESS; } @@ -199,4 +199,4 @@ void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) { tsdbFreeBufBlock(pBufBlock); free(pNode); pPool->nBufBlocks--; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6330da6058..6c98283189 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -1290,7 +1290,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ASSERT(pSchema != NULL); } - tdAppendMemRowToDataCol(row, pSchema, pTarget); + tdAppendMemRowToDataCol(row, pSchema, pTarget, true); } tSkipListIterNext(pCommitIter->pIter); @@ -1302,7 +1302,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ASSERT(pSchema != NULL); } - tdAppendMemRowToDataCol(row, pSchema, pTarget); + tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE); } } else { ASSERT(!isRowDel); diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c index e45ac05e97..59fb4f334d 100644 --- a/src/tsdb/src/tsdbCommitQueue.c +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -138,7 +138,7 @@ static void tsdbApplyRepoConfig(STsdbRepo *pRepo) { pSaveCfg->compression, pSaveCfg->keep,pSaveCfg->keep1, pSaveCfg->keep2, pSaveCfg->totalBlocks, oldCfg.cacheLastRow, pSaveCfg->cacheLastRow, oldTotalBlocks, pSaveCfg->totalBlocks); - int err = tsdbExpendPool(pRepo, oldTotalBlocks); + int err = tsdbExpandPool(pRepo, oldTotalBlocks); if (!TAOS_SUCCEEDED(err)) { tsdbError("vgId:%d expand pool from %d to %d fail,reason:%s", REPO_ID(pRepo), oldTotalBlocks, pSaveCfg->totalBlocks, tstrerror(err)); diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 5211ee3c61..0490f26b5e 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -455,7 +455,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) { if (pReadh->pDCols[0]->numOfRows - ridx == 0) break; int rowsToMerge = MIN(pReadh->pDCols[0]->numOfRows - ridx, defaultRows - pComph->pDataCols->numOfRows); - tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx); + tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx, pCfg->update != TD_ROW_PARTIAL_UPDATE); if (pComph->pDataCols->numOfRows < defaultRows) { break; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index f3a7c4b7ee..c877bfc7af 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -14,6 +14,7 @@ */ // no test file errors here +#include "taosdef.h" #include "tsdbint.h" #define IS_VALID_PRECISION(precision) \ @@ -106,6 +107,8 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { return NULL; } + pRepo->mergeBuf = NULL; + tsdbStartStream(pRepo); tsdbDebug("vgId:%d, TSDB repository opened", REPO_ID(pRepo)); @@ -518,7 +521,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } // update check - if (pCfg->update != 0) pCfg->update = 1; + if (pCfg->update < TD_ROW_DISCARD_UPDATE || pCfg->update > TD_ROW_PARTIAL_UPDATE) + pCfg->update = TD_ROW_DISCARD_UPDATE; // update cacheLastRow if (pCfg->cacheLastRow != 0) { @@ -597,6 +601,7 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) { tsdbFreeFS(pRepo->fs); tsdbFreeBufPool(pRepo->pPool); tsdbFreeMeta(pRepo->tsdbMeta); + tsdbFreeMergeBuf(pRepo->mergeBuf); // tsdbFreeMemTable(pRepo->mem); // tsdbFreeMemTable(pRepo->imem); tsem_destroy(&(pRepo->readyToCommit)); diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index ee9dadd9b7..8bb2d1c44e 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -13,7 +13,11 @@ * along with this program. If not, see . */ +#include "tdataformat.h" +#include "tfunctional.h" #include "tsdbint.h" +#include "tskiplist.h" +#include "tsdbRowMergeBuf.h" #define TSDB_DATA_SKIPLIST_LEVEL 5 #define TSDB_MAX_INSERT_BATCH 512 @@ -30,24 +34,22 @@ typedef struct { void * pMsg; } SSubmitMsgIter; -static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo); -static void tsdbFreeMemTable(SMemTable *pMemTable); -static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); -static void tsdbFreeTableData(STableData *pTableData); -static char * tsdbGetTsTupleKey(const void *data); +static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo); +static void tsdbFreeMemTable(SMemTable *pMemTable); +static STableData* tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); +static void tsdbFreeTableData(STableData *pTableData); +static char * tsdbGetTsTupleKey(const void *data); static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables); -static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row); +static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row); static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); -static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); +static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows); -static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow); static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); -static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter); -static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter); -static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row); +static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row); + static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SMemRow row, TSKEY minKey, TSKEY maxKey, TSKEY now); @@ -342,7 +344,7 @@ int tsdbSyncCommit(STsdbRepo *repo) { * 3. rowsIncreased = rowsInserted - rowsDeleteSucceed >= maxRowsToRead * 4. operations in pCols not exceeds its max capacity if pCols is given * - * The function tries to procceed AS MUSH AS POSSIBLE. + * The function tries to procceed AS MUCH AS POSSIBLE. */ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols, TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) { @@ -523,9 +525,15 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) { pTableData->keyLast = 0; pTableData->numOfRows = 0; + uint8_t skipListCreateFlags; + if(pCfg->update == TD_ROW_DISCARD_UPDATE) + skipListCreateFlags = SL_DISCARD_DUP_KEY; + else + skipListCreateFlags = SL_UPDATE_DUP_KEY; + pTableData->pData = tSkipListCreate(TSDB_DATA_SKIPLIST_LEVEL, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], - tkeyComparFn, pCfg->update ? SL_UPDATE_DUP_KEY : SL_DISCARD_DUP_KEY, tsdbGetTsTupleKey); + tkeyComparFn, skipListCreateFlags, tsdbGetTsTupleKey); if (pTableData->pData == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; free(pTableData); @@ -581,7 +589,7 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema * } } - tdAppendMemRowToDataCol(row, *ppSchema, pCols); + tdAppendMemRowToDataCol(row, *ppSchema, pCols, true); } return 0; @@ -693,45 +701,152 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { return 0; } -static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows) { - STsdbMeta * pMeta = pRepo->tsdbMeta; - int64_t points = 0; - STable * pTable = NULL; - SSubmitBlkIter blkIter = {0}; - SMemRow row = NULL; - void * rows[TSDB_MAX_INSERT_BATCH] = {0}; - int rowCounter = 0; +//row1 has higher priority +static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, STSchema **ppSchema1, STSchema **ppSchema2, STable* pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) { + + //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows! + if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) { + (*pAffectedRows)++; + (*pPoints)++; + return NULL; + } - ASSERT(pBlock->tid < pMeta->maxTables); - pTable = pMeta->tables[pBlock->tid]; - ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); + if(row2 == NULL || pRepo->config.update != TD_ROW_PARTIAL_UPDATE) { + void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1)); + if(pMem == NULL) return NULL; + memRowCpy(pMem, row1); + (*pAffectedRows)++; + (*pPoints)++; + *pLastRow = pMem; + return pMem; + } - tsdbInitSubmitBlkIter(pBlock, &blkIter); - while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { - if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) { - tsdbFreeRows(pRepo, rows, rowCounter); - goto _err; + STSchema *pSchema1 = *ppSchema1; + STSchema *pSchema2 = *ppSchema2; + SMergeBuf * pBuf = &pRepo->mergeBuf; + int dv1 = memRowVersion(row1); + int dv2 = memRowVersion(row2); + if(pSchema1 == NULL || schemaVersion(pSchema1) != dv1) { + if(pSchema2 != NULL && schemaVersion(pSchema2) == dv1) { + *ppSchema1 = pSchema2; + } else { + *ppSchema1 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row1)); } + pSchema1 = *ppSchema1; + } - (*affectedrows)++; - points++; - - if (rows[rowCounter] != NULL) { - rowCounter++; - } - - if (rowCounter == TSDB_MAX_INSERT_BATCH) { - if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) { - goto _err; - } - - rowCounter = 0; - memset(rows, 0, sizeof(rows)); + if(pSchema2 == NULL || schemaVersion(pSchema2) != dv2) { + if(schemaVersion(pSchema1) == dv2) { + pSchema2 = pSchema1; + } else { + *ppSchema2 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row2)); + pSchema2 = *ppSchema2; } } - if (rowCounter > 0 && tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) { - goto _err; + SMemRow tmp = tsdbMergeTwoRows(pBuf, row1, row2, pSchema1, pSchema2); + + void* pMem = tsdbAllocBytes(pRepo, memRowTLen(tmp)); + if(pMem == NULL) return NULL; + memRowCpy(pMem, tmp); + + (*pAffectedRows)++; + (*pPoints)++; + + *pLastRow = pMem; + return pMem; +} + +static void* tsdbInsertDupKeyMergePacked(void** args) { + return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7], args[8]); +} + +static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) { + + if(pSkipList->insertHandleFn == NULL) { + tGenericSavedFunc *dupHandleSavedFunc = genericSavedFuncInit((GenericVaFunc)&tsdbInsertDupKeyMergePacked, 9); + dupHandleSavedFunc->args[2] = pRepo; + dupHandleSavedFunc->args[3] = NULL; + dupHandleSavedFunc->args[4] = NULL; + dupHandleSavedFunc->args[5] = pTable; + dupHandleSavedFunc->args[6] = pAffectedRows; + dupHandleSavedFunc->args[7] = pPoints; + dupHandleSavedFunc->args[8] = pLastRow; + pSkipList->insertHandleFn = dupHandleSavedFunc; + } +} + +static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *pAffectedRows) { + + STsdbMeta *pMeta = pRepo->tsdbMeta; + int64_t points = 0; + STable *pTable = NULL; + SSubmitBlkIter blkIter = {0}; + SMemTable *pMemTable = NULL; + STableData *pTableData = NULL; + STsdbCfg *pCfg = &(pRepo->config); + + tsdbInitSubmitBlkIter(pBlock, &blkIter); + if(blkIter.row == NULL) return 0; + TSKEY firstRowKey = memRowKey(blkIter.row); + + tsdbAllocBytes(pRepo, 0); + pMemTable = pRepo->mem; + + ASSERT(pMemTable != NULL); + ASSERT(pBlock->tid < pMeta->maxTables); + + pTable = pMeta->tables[pBlock->tid]; + + ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); + + + if (TABLE_TID(pTable) >= pMemTable->maxTables) { + if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { + return -1; + } + } + pTableData = pMemTable->tData[TABLE_TID(pTable)]; + + if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) { + if (pTableData != NULL) { + taosWLockLatch(&(pMemTable->latch)); + pMemTable->tData[TABLE_TID(pTable)] = NULL; + tsdbFreeTableData(pTableData); + taosWUnLockLatch(&(pMemTable->latch)); + } + + pTableData = tsdbNewTableData(pCfg, pTable); + if (pTableData == NULL) { + tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo), + TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno)); + return -1; + } + + pRepo->mem->tData[TABLE_TID(pTable)] = pTableData; + } + + ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable)); + + SMemRow lastRow = NULL; + int64_t osize = SL_SIZE(pTableData->pData); + tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, pAffectedRows, &points, &lastRow); + tSkipListPutBatchByIter(pTableData->pData, &blkIter, (iter_next_fn_t)tsdbGetSubmitBlkNext); + int64_t dsize = SL_SIZE(pTableData->pData) - osize; + + + if(lastRow != NULL) { + TSKEY lastRowKey = memRowKey(lastRow); + if (pMemTable->keyFirst > firstRowKey) pMemTable->keyFirst = firstRowKey; + pMemTable->numOfRows += dsize; + + if (pTableData->keyFirst > firstRowKey) pTableData->keyFirst = firstRowKey; + pTableData->numOfRows += dsize; + if (pMemTable->keyLast < lastRowKey) pMemTable->keyLast = lastRowKey; + if (pTableData->keyLast < lastRowKey) pTableData->keyLast = lastRowKey; + if (tsdbUpdateTableLatestInfo(pRepo, pTable, lastRow) < 0) { + return -1; + } } STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); @@ -739,48 +854,8 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t * pRepo->stat.totalStorage += points * schemaVLen(pSchema); return 0; - -_err: - return -1; } -static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow) { - STsdbCfg * pCfg = &pRepo->config; - TKEY tkey = memRowTKey(row); - TSKEY key = memRowKey(row); - bool isRowDelete = TKEY_IS_DELETED(tkey); - - if (isRowDelete) { - if (!pCfg->update) { - tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo)); - terrno = TSDB_CODE_TDB_INVALID_ACTION; - return -1; - } - - TSKEY lastKey = tsdbGetTableLastKeyImpl(pTable); - if (key > lastKey) { - tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64, - REPO_ID(pRepo), key, lastKey); - return 0; - } - } - - void *pRow = tsdbAllocBytes(pRepo, memRowTLen(row)); - if (pRow == NULL) { - tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %" PRIu32 " bytes since %s", - REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), memRowTLen(row), tstrerror(terrno)); - return -1; - } - - memRowCpy(pRow, row); - ppRow[0] = pRow; // save the memory address of data rows - - tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), - isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), - key); - - return 0; -} static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) { if (pMsg == NULL) { @@ -889,106 +964,6 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT return 0; } -static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter) { - if (rowCounter < 1) return 0; - - SMemTable * pMemTable = NULL; - STableData *pTableData = NULL; - STsdbMeta * pMeta = pRepo->tsdbMeta; - STsdbCfg * pCfg = &(pRepo->config); - - ASSERT(pRepo->mem != NULL); - pMemTable = pRepo->mem; - - if (TABLE_TID(pTable) >= pMemTable->maxTables) { - if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { - tsdbFreeRows(pRepo, rows, rowCounter); - return -1; - } - } - pTableData = pMemTable->tData[TABLE_TID(pTable)]; - - if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) { - if (pTableData != NULL) { - taosWLockLatch(&(pMemTable->latch)); - pMemTable->tData[TABLE_TID(pTable)] = NULL; - tsdbFreeTableData(pTableData); - taosWUnLockLatch(&(pMemTable->latch)); - } - - pTableData = tsdbNewTableData(pCfg, pTable); - if (pTableData == NULL) { - tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo), - TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno)); - tsdbFreeRows(pRepo, rows, rowCounter); - return -1; - } - - pRepo->mem->tData[TABLE_TID(pTable)] = pTableData; - } - - ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable)); - - int64_t osize = SL_SIZE(pTableData->pData); - tSkipListPutBatch(pTableData->pData, rows, rowCounter); - int64_t dsize = SL_SIZE(pTableData->pData) - osize; - TSKEY keyFirstRow = memRowKey(rows[0]); - TSKEY keyLastRow = memRowKey(rows[rowCounter - 1]); - - if (pMemTable->keyFirst > keyFirstRow) pMemTable->keyFirst = keyFirstRow; - if (pMemTable->keyLast < keyLastRow) pMemTable->keyLast = keyLastRow; - pMemTable->numOfRows += dsize; - - if (pTableData->keyFirst > keyFirstRow) pTableData->keyFirst = keyFirstRow; - if (pTableData->keyLast < keyLastRow) pTableData->keyLast = keyLastRow; - pTableData->numOfRows += dsize; - - // update table latest info - if (tsdbUpdateTableLatestInfo(pRepo, pTable, rows[rowCounter - 1]) < 0) { - return -1; - } - - return 0; -} - -static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) { - ASSERT(pRepo->mem != NULL); - STsdbBufPool *pBufPool = pRepo->pPool; - - for (int i = rowCounter - 1; i >= 0; --i) { - SMemRow row = (SMemRow)rows[i]; - int bytes = (int)memRowTLen(row); - - if (pRepo->mem->extraBuffList == NULL) { - STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); - ASSERT(pBufBlock != NULL && pBufBlock->offset >= bytes); - - pBufBlock->offset -= bytes; - pBufBlock->remain += bytes; - ASSERT(row == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset)); - tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes, - listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain); - - if (pBufBlock->offset == 0) { // return the block to buffer pool - if (tsdbLockRepo(pRepo) < 0) return; - SListNode *pNode = tdListPopTail(pRepo->mem->bufBlockList); - tdListPrependNode(pBufPool->bufBlockList, pNode); - if (tsdbUnlockRepo(pRepo) < 0) return; - } - } else { - ASSERT(listNEles(pRepo->mem->extraBuffList) > 0); - SListNode *pNode = tdListPopTail(pRepo->mem->extraBuffList); - ASSERT(row == pNode->data); - free(pNode); - tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes); - - if (listNEles(pRepo->mem->extraBuffList) == 0) { - tdListFree(pRepo->mem->extraBuffList); - pRepo->mem->extraBuffList = NULL; - } - } - } -} static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow row) { tsdbDebug("vgId:%d updateTableLatestColumn, %s row version:%d", REPO_ID(pRepo), pTable->name->data, @@ -1005,8 +980,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro } SDataCol *pLatestCols = pTable->lastCols; + int32_t kvIdx = 0; - bool isDataRow = isDataRow(row); for (int16_t j = 0; j < schemaNCols(pSchema); j++) { STColumn *pTCol = schemaColAt(pSchema, j); // ignore not exist colId @@ -1017,16 +992,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro void *value = NULL; - if (isDataRow) { - value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type, - TD_DATA_ROW_HEAD_SIZE + pTCol->offset); - } else { - // SKVRow - SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId); - if (pColIdx) { - value = tdGetKvRowDataOfCol(memRowKvBody(row), pColIdx->offset); - } - } + value = tdGetMemRowDataOfColEx(row, pTCol->colId, (int8_t)pTCol->type, + TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset, &kvIdx); if ((value == NULL) || isNull(value, pTCol->type)) { continue; @@ -1055,13 +1022,14 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow r // if cacheLastRow config has been reset, free the lastRow if (!pCfg->cacheLastRow && pTable->lastRow != NULL) { - taosTZfree(pTable->lastRow); + SMemRow cachedLastRow = pTable->lastRow; TSDB_WLOCK_TABLE(pTable); pTable->lastRow = NULL; TSDB_WUNLOCK_TABLE(pTable); + taosTZfree(cachedLastRow); } - if (tsdbGetTableLastKeyImpl(pTable) < memRowKey(row)) { + if (tsdbGetTableLastKeyImpl(pTable) <= memRowKey(row)) { if (CACHE_LAST_ROW(pCfg) || pTable->lastRow != NULL) { SMemRow nrow = pTable->lastRow; if (taosTSizeof(nrow) < memRowTLen(row)) { diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 7a595da39a..f5c01d86e7 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -14,11 +14,14 @@ */ #include "os.h" +#include "tdataformat.h" +#include "tskiplist.h" #include "tulog.h" #include "talgo.h" #include "tcompare.h" #include "exception.h" +#include "taosdef.h" #include "tlosertree.h" #include "tsdbint.h" #include "texpr.h" @@ -68,6 +71,12 @@ typedef struct SLoadCompBlockInfo { int32_t fileId; } SLoadCompBlockInfo; +enum { + CHECKINFO_CHOSEN_MEM = 0, + CHECKINFO_CHOSEN_IMEM = 1, + CHECKINFO_CHOSEN_BOTH = 2 //for update=2(merge case) +}; + typedef struct STableCheckInfo { STableId tableId; @@ -76,7 +85,7 @@ typedef struct STableCheckInfo { SBlockInfo* pCompInfo; int32_t compSize; int32_t numOfBlocks:29; // number of qualified data blocks not the original blocks - int8_t chosen:2; // indicate which iterator should move forward + uint8_t chosen:2; // indicate which iterator should move forward bool initBuf; // whether to initialize the in-memory skip list iterator or not SSkipListIterator* iter; // mem buffer skip list iterator SSkipListIterator* iiter; // imem buffer skip list iterator @@ -781,7 +790,62 @@ static void destroyTableMemIterator(STableCheckInfo* pCheckInfo) { tSkipListDestroyIter(pCheckInfo->iiter); } -static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { +static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { + SMemRow rmem = NULL, rimem = NULL; + if (pCheckInfo->iter) { + SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); + if (node != NULL) { + rmem = (SMemRow)SL_GET_NODE_DATA(node); + } + } + + if (pCheckInfo->iiter) { + SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); + if (node != NULL) { + rimem = (SMemRow)SL_GET_NODE_DATA(node); + } + } + + if (rmem == NULL && rimem == NULL) { + return TSKEY_INITIAL_VAL; + } + + if (rmem != NULL && rimem == NULL) { + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; + return memRowKey(rmem); + } + + if (rmem == NULL && rimem != NULL) { + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; + return memRowKey(rimem); + } + + TSKEY r1 = memRowKey(rmem); + TSKEY r2 = memRowKey(rimem); + + if (r1 == r2) { + if(update == TD_ROW_DISCARD_UPDATE){ + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; + tSkipListIterNext(pCheckInfo->iter); + } + else if(update == TD_ROW_OVERWRITE_UPDATE) { + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; + tSkipListIterNext(pCheckInfo->iiter); + } else { + pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH; + } + return r1; + } else if (r1 < r2 && ASCENDING_TRAVERSE(order)) { + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; + return r1; + } + else { + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; + return r2; + } +} + +static SMemRow getSMemRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, SMemRow* extraRow) { SMemRow rmem = NULL, rimem = NULL; if (pCheckInfo->iter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); @@ -814,31 +878,35 @@ static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, TSKEY r1 = memRowKey(rmem); TSKEY r2 = memRowKey(rimem); - if (r1 == r2) { // data ts are duplicated, ignore the data in mem - if (!update) { + if (r1 == r2) { + if (update == TD_ROW_DISCARD_UPDATE) { tSkipListIterNext(pCheckInfo->iter); - pCheckInfo->chosen = 1; + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; return rimem; - } else { + } else if(update == TD_ROW_OVERWRITE_UPDATE){ tSkipListIterNext(pCheckInfo->iiter); - pCheckInfo->chosen = 0; + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; + return rmem; + } else { + pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH; + extraRow = rimem; return rmem; } } else { if (ASCENDING_TRAVERSE(order)) { if (r1 < r2) { - pCheckInfo->chosen = 0; + pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM; return rmem; } else { - pCheckInfo->chosen = 1; + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; return rimem; } } else { if (r1 < r2) { - pCheckInfo->chosen = 1; + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; return rimem; } else { - pCheckInfo->chosen = 0; + pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM; return rmem; } } @@ -847,7 +915,7 @@ static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) { bool hasNext = false; - if (pCheckInfo->chosen == 0) { + if (pCheckInfo->chosen == CHECKINFO_CHOSEN_MEM) { if (pCheckInfo->iter != NULL) { hasNext = tSkipListIterNext(pCheckInfo->iter); } @@ -859,7 +927,7 @@ static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) { if (pCheckInfo->iiter != NULL) { return tSkipListIterGet(pCheckInfo->iiter) != NULL; } - } else { //pCheckInfo->chosen == 1 + } else if (pCheckInfo->chosen == CHECKINFO_CHOSEN_IMEM){ if (pCheckInfo->iiter != NULL) { hasNext = tSkipListIterNext(pCheckInfo->iiter); } @@ -871,6 +939,13 @@ static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) { if (pCheckInfo->iter != NULL) { return tSkipListIterGet(pCheckInfo->iter) != NULL; } + } else { + if (pCheckInfo->iter != NULL) { + hasNext = tSkipListIterNext(pCheckInfo->iter); + } + if (pCheckInfo->iiter != NULL) { + hasNext = tSkipListIterNext(pCheckInfo->iiter) || hasNext; + } } return hasNext; @@ -891,7 +966,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { initTableMemIterator(pHandle, pCheckInfo); } - SMemRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order, pCfg->update); + SMemRow row = getSMemRowInTableMem(pCheckInfo, pHandle->order, pCfg->update, NULL); if (row == NULL) { return false; } @@ -1147,25 +1222,28 @@ static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t c static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols); static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle); static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SDataBlockInfo* pBlockInfo, int32_t endPos); +static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order, int32_t update); static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, STableCheckInfo* pCheckInfo){ SQueryFilePos* cur = &pQueryHandle->cur; STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); + TSKEY key; int32_t code = TSDB_CODE_SUCCESS; /*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo); - SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); assert(cur->pos >= 0 && cur->pos <= binfo.rows); - TSKEY key = (row != NULL) ? memRowKey(row) : TSKEY_INITIAL_VAL; + key = extractFirstTraverseKey(pCheckInfo, pQueryHandle->order, pCfg->update); + if (key != TSKEY_INITIAL_VAL) { tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId); } else { tsdbDebug("%p no data in mem, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId); } + if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { @@ -1190,6 +1268,7 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p return code; } + // return error, add test cases if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { return code; @@ -1452,186 +1531,204 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity return numOfRows + num; } -static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SMemRow row, - int32_t numOfCols, STable* pTable, STSchema* pSchema) { +// Note: row1 always has high priority +static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, + SMemRow row1, SMemRow row2, int32_t numOfCols, STable* pTable, + STSchema* pSchema1, STSchema* pSchema2, bool forceSetNull) { char* pData = NULL; + STSchema* pSchema; + SMemRow row; + int16_t colId; + int16_t offset; - // the schema version info is embedded in SDataRow, and use latest schema version for SKVRow - int32_t numOfRowCols = 0; - if (pSchema == NULL) { - pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); - numOfRowCols = schemaNCols(pSchema); + bool isRow1DataRow = isDataRow(row1); + bool isRow2DataRow; + bool isChosenRowDataRow; + int32_t chosen_itr; + void *value; + + // the schema version info is embeded in SDataRow + int32_t numOfColsOfRow1 = 0; + + if (pSchema1 == NULL) { + pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1)); + } + if(isRow1DataRow) { + numOfColsOfRow1 = schemaNCols(pSchema1); } else { - numOfRowCols = schemaNCols(pSchema); + numOfColsOfRow1 = kvRowNCols(memRowKvBody(row1)); } - int32_t i = 0; - - if (isDataRow(row)) { - SDataRow dataRow = memRowDataBody(row); - int32_t j = 0; - while (i < numOfCols && j < numOfRowCols) { - SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); - if (pSchema->columns[j].colId < pColInfo->info.colId) { - j++; - continue; - } - - if (ASCENDING_TRAVERSE(pQueryHandle->order)) { - pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; - } else { - pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; - } - - if (pSchema->columns[j].colId == pColInfo->info.colId) { - void* value = - tdGetRowDataOfCol(dataRow, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); - switch (pColInfo->info.type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(pData, value, varDataTLen(value)); - break; - case TSDB_DATA_TYPE_NULL: - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *(uint8_t*)pData = *(uint8_t*)value; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *(uint16_t*)pData = *(uint16_t*)value; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *(uint32_t*)pData = *(uint32_t*)value; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *(uint64_t*)pData = *(uint64_t*)value; - break; - case TSDB_DATA_TYPE_FLOAT: - SET_FLOAT_PTR(pData, value); - break; - case TSDB_DATA_TYPE_DOUBLE: - SET_DOUBLE_PTR(pData, value); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - *(TSKEY*)pData = tdGetKey(*(TKEY*)value); - } else { - *(TSKEY*)pData = *(TSKEY*)value; - } - break; - default: - memcpy(pData, value, pColInfo->info.bytes); - } - - j++; - i++; - } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data - if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pData, pColInfo->info.type); - } else { - setNull(pData, pColInfo->info.type, pColInfo->info.bytes); - } - i++; - } + int32_t numOfColsOfRow2 = 0; + if(row2) { + isRow2DataRow = isDataRow(row2); + if (pSchema2 == NULL) { + pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2)); } - } else if (isKvRow(row)) { - SKVRow kvRow = memRowKvBody(row); - int32_t k = 0; - int32_t nKvRowCols = kvRowNCols(kvRow); - - while (i < numOfCols && k < nKvRowCols) { - SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); - SColIdx* pColIdx = kvRowColIdxAt(kvRow, k); - - if (pColIdx->colId < pColInfo->info.colId) { - ++k; - continue; - } - - if (ASCENDING_TRAVERSE(pQueryHandle->order)) { - pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; - } else { - pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; - } - - if (pColIdx->colId == pColInfo->info.colId) { - // offset of pColIdx for SKVRow including the TD_KV_ROW_HEAD_SIZE - void* value = tdGetKvRowDataOfCol(kvRow, pColIdx->offset); - switch (pColInfo->info.type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(pData, value, varDataTLen(value)); - break; - case TSDB_DATA_TYPE_NULL: - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *(uint8_t*)pData = *(uint8_t*)value; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *(uint16_t*)pData = *(uint16_t*)value; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *(uint32_t*)pData = *(uint32_t*)value; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *(uint64_t*)pData = *(uint64_t*)value; - break; - case TSDB_DATA_TYPE_FLOAT: - SET_FLOAT_PTR(pData, value); - break; - case TSDB_DATA_TYPE_DOUBLE: - SET_DOUBLE_PTR(pData, value); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - *(TSKEY*)pData = tdGetKey(*(TKEY*)value); - } else { - *(TSKEY*)pData = *(TSKEY*)value; - } - break; - default: - memcpy(pData, value, pColInfo->info.bytes); - } - ++k; - ++i; - continue; - } - // If (pColInfo->info.colId < pColIdx->colId), it is NULL data - if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pData, pColInfo->info.type); - } else { - setNull(pData, pColInfo->info.type, pColInfo->info.bytes); - } - ++i; + if(isRow2DataRow) { + numOfColsOfRow2 = schemaNCols(pSchema2); + } else { + numOfColsOfRow2 = kvRowNCols(memRowKvBody(row2)); } - } else { - ASSERT(0); } - while (i < numOfCols) { // the remain columns are all null data + + int32_t i = 0, j = 0, k = 0; + while(i < numOfCols && (j < numOfColsOfRow1 || k < numOfColsOfRow2)) { SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; } else { pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; } - if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pData, pColInfo->info.type); + int32_t colIdOfRow1; + if(j >= numOfColsOfRow1) { + colIdOfRow1 = INT32_MAX; + } else if(isRow1DataRow) { + colIdOfRow1 = pSchema1->columns[j].colId; } else { - setNull(pData, pColInfo->info.type, pColInfo->info.bytes); + void *rowBody = memRowKvBody(row1); + SColIdx *pColIdx = kvRowColIdxAt(rowBody, j); + colIdOfRow1 = pColIdx->colId; } - i++; + int32_t colIdOfRow2; + if(k >= numOfColsOfRow2) { + colIdOfRow2 = INT32_MAX; + } else if(isRow2DataRow) { + colIdOfRow2 = pSchema2->columns[k].colId; + } else { + void *rowBody = memRowKvBody(row2); + SColIdx *pColIdx = kvRowColIdxAt(rowBody, k); + colIdOfRow2 = pColIdx->colId; + } + + if(colIdOfRow1 == colIdOfRow2) { + if(colIdOfRow1 < pColInfo->info.colId) { + j++; + k++; + continue; + } + row = row1; + pSchema = pSchema1; + isChosenRowDataRow = isRow1DataRow; + chosen_itr = j; + } else if(colIdOfRow1 < colIdOfRow2) { + if(colIdOfRow1 < pColInfo->info.colId) { + j++; + continue; + } + row = row1; + pSchema = pSchema1; + isChosenRowDataRow = isRow1DataRow; + chosen_itr = j; + } else { + if(colIdOfRow2 < pColInfo->info.colId) { + k++; + continue; + } + row = row2; + pSchema = pSchema2; + chosen_itr = k; + isChosenRowDataRow = isRow2DataRow; + } + if(isChosenRowDataRow) { + colId = pSchema->columns[chosen_itr].colId; + offset = pSchema->columns[chosen_itr].offset; + void *rowBody = memRowDataBody(row); + value = tdGetRowDataOfCol(rowBody, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset); + } else { + void *rowBody = memRowKvBody(row); + SColIdx *pColIdx = kvRowColIdxAt(rowBody, chosen_itr); + colId = pColIdx->colId; + offset = pColIdx->offset; + value = tdGetKvRowDataOfCol(rowBody, pColIdx->offset); + } + + + if (colId == pColInfo->info.colId) { + if(forceSetNull || (!isNull(value, (int8_t)pColInfo->info.type))) { + switch (pColInfo->info.type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t *)pData = *(uint8_t *)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t *)pData = *(uint16_t *)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t *)pData = *(uint32_t *)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t *)pData = *(uint64_t *)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY *)pData = tdGetKey(*(TKEY *)value); + } else { + *(TSKEY *)pData = *(TSKEY *)value; + } + break; + default: + memcpy(pData, value, pColInfo->info.bytes); + } + } + i++; + + if(row == row1) { + j++; + } else { + k++; + } + } else { + if(forceSetNull) { + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull(pData, pColInfo->info.type); + } else { + setNull(pData, pColInfo->info.type, pColInfo->info.bytes); + } + } + i++; + } + } + + if(forceSetNull) { + while (i < numOfCols) { // the remain columns are all null data + SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; + } else { + pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; + } + + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull(pData, pColInfo->info.type); + } else { + setNull(pData, pColInfo->info.type, pColInfo->info.bytes); + } + + i++; + } } } + static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols) { if (numOfRows == 0 || ASCENDING_TRAVERSE(pQueryHandle->order)) { return; @@ -1798,8 +1895,10 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* // compared with the data from in-memory buffer, to generate the correct timestamp array list int32_t numOfRows = 0; - int16_t rv = -1; - STSchema* pSchema = NULL; + int16_t rv1 = -1; + int16_t rv2 = -1; + STSchema* pSchema1 = NULL; + STSchema* pSchema2 = NULL; int32_t pos = cur->pos; cur->win = TSWINDOW_INITIALIZER; @@ -1811,12 +1910,13 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) { SSkipListNode* node = NULL; do { - SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); - if (row == NULL) { + SMemRow row2 = NULL; + SMemRow row1 = getSMemRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update, &row2); + if (row1 == NULL) { break; } - TSKEY key = memRowKey(row); + TSKEY key = memRowKey(row1); if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) { break; @@ -1829,12 +1929,16 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) { - if (rv != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); - rv = memRowVersion(row); + if (rv1 != memRowVersion(row1)) { + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); + rv1 = memRowVersion(row1); } - - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); + if(row2 && rv2 != memRowVersion(row2)) { + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); + rv2 = memRowVersion(row2); + } + + mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, true); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -1847,12 +1951,20 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* moveToNextRowInMem(pCheckInfo); } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it if (pCfg->update) { - if (rv != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); - rv = memRowVersion(row); + if(pCfg->update == TD_ROW_PARTIAL_UPDATE) { + doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, pos, pos); } - - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); + if (rv1 != memRowVersion(row1)) { + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); + rv1 = memRowVersion(row1); + } + if(row2 && rv2 != memRowVersion(row2)) { + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); + rv2 = memRowVersion(row2); + } + + bool forceSetNull = pCfg->update != TD_ROW_PARTIAL_UPDATE; + mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, forceSetNull); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -1877,7 +1989,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* assert(end != -1); if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it - if (!pCfg->update) { + if (pCfg->update == TD_ROW_DISCARD_UPDATE) { moveToNextRowInMem(pCheckInfo); } else { end -= step; @@ -2490,7 +2602,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int STSchema* pSchema = NULL; do { - SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); + SMemRow row = getSMemRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update, NULL); if (row == NULL) { break; } @@ -2512,7 +2624,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); rv = memRowVersion(row); } - copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, numOfCols, pTable, pSchema); + mergeTwoRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, NULL, numOfCols, pTable, pSchema, NULL, true); if (++numOfRows >= maxRowsToRead) { moveToNextRowInMem(pCheckInfo); @@ -2637,7 +2749,7 @@ static bool loadCachedLastRow(STsdbQueryHandle* pQueryHandle) { if (ret != TSDB_CODE_SUCCESS) { return false; } - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj, NULL); + mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, NULL, numOfCols, pCheckInfo->pTableObj, NULL, NULL, true); tfree(pRow); // update the last key value @@ -2920,7 +3032,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM } } - SArray* row = (type == TSDB_PREV_ROW)? pQueryHandle->prev:pQueryHandle->next; + SArray* row = (type == TSDB_PREV_ROW)? pQueryHandle->prev : pQueryHandle->next; for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); @@ -3787,10 +3899,6 @@ static void* doFreeColumnInfoData(SArray* pColumnInfoData) { } static void* destroyTableCheckInfo(SArray* pTableCheckInfo) { - if (pTableCheckInfo == NULL) { - return NULL; - } - size_t size = taosArrayGetSize(pTableCheckInfo); for (int32_t i = 0; i < size; ++i) { STableCheckInfo* p = taosArrayGet(pTableCheckInfo, i); @@ -3834,6 +3942,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); SIOCostSummary* pCost = &pQueryHandle->cost; + tsdbDebug("%p :io-cost summary: head-file read cnt:%"PRIu64", head-file time:%"PRIu64" us, statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64, pQueryHandle, pCost->headFileLoad, pCost->headFileLoadTime, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId); diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index dd14dc700f..666a2d3571 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -244,6 +244,7 @@ int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) { int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { ASSERT(pBlock->numOfSubBlocks > 0); + int8_t update = pReadh->pRepo->config.update; SBlock *iBlock = pBlock; if (pBlock->numOfSubBlocks > 1) { @@ -258,7 +259,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1; - if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, update != TD_ROW_PARTIAL_UPDATE) < 0) return -1; } ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); @@ -270,6 +271,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, int16_t *colIds, int numOfColsIds) { ASSERT(pBlock->numOfSubBlocks > 0); + int8_t update = pReadh->pRepo->config.update; SBlock *iBlock = pBlock; if (pBlock->numOfSubBlocks > 1) { @@ -284,7 +286,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1; - if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, update != TD_ROW_PARTIAL_UPDATE) < 0) return -1; } ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); @@ -657,4 +659,4 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc } return 0; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbRowMergeBuf.c b/src/tsdb/src/tsdbRowMergeBuf.c new file mode 100644 index 0000000000..5ce580f70f --- /dev/null +++ b/src/tsdb/src/tsdbRowMergeBuf.c @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tsdbRowMergeBuf.h" +#include "tdataformat.h" + +// row1 has higher priority +SMemRow tsdbMergeTwoRows(SMergeBuf *pBuf, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) { + if(row2 == NULL) return row1; + if(row1 == NULL) return row2; + ASSERT(pSchema1->version == memRowVersion(row1)); + ASSERT(pSchema2->version == memRowVersion(row2)); + + if(tsdbMergeBufMakeSureRoom(pBuf, pSchema1, pSchema2) < 0) { + return NULL; + } + return mergeTwoMemRows(*pBuf, row1, row2, pSchema1, pSchema2); +} diff --git a/src/util/inc/tfunctional.h b/src/util/inc/tfunctional.h new file mode 100644 index 0000000000..70f54e921d --- /dev/null +++ b/src/util/inc/tfunctional.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef TD_TFUNCTIONAL_H +#define TD_TFUNCTIONAL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "os.h" + +//TODO: hard to use, trying to rewrite it using va_list + +typedef void* (*GenericVaFunc)(void* args[]); +typedef int32_t (*I32VaFunc) (void* args[]); +typedef void (*VoidVaFunc) (void* args[]); + +typedef struct GenericSavedFunc { + GenericVaFunc func; + void * args[]; +} tGenericSavedFunc; + +typedef struct I32SavedFunc { + I32VaFunc func; + void * args[]; +} tI32SavedFunc; + +typedef struct VoidSavedFunc { + VoidVaFunc func; + void * args[]; +} tVoidSavedFunc; + +tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs); +tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs); +tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs); +void* genericInvoke(tGenericSavedFunc* const pSavedFunc); +int32_t i32Invoke(tI32SavedFunc* const pSavedFunc); +void voidInvoke(tVoidSavedFunc* const pSavedFunc); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h index 17f5940b49..d9dc001ccd 100644 --- a/src/util/inc/tskiplist.h +++ b/src/util/inc/tskiplist.h @@ -23,6 +23,7 @@ extern "C" { #include "os.h" #include "taosdef.h" #include "tarray.h" +#include "tfunctional.h" #define MAX_SKIP_LIST_LEVEL 15 #define SKIP_LIST_RECORD_PERFORMANCE 0 @@ -30,13 +31,17 @@ extern "C" { // For key property setting #define SL_ALLOW_DUP_KEY (uint8_t)0x0 // Allow duplicate key exists (for tag index usage) #define SL_DISCARD_DUP_KEY (uint8_t)0x1 // Discard duplicate key (for data update=0 case) -#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update=1 case) +#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update!=0 case) + // For thread safety setting #define SL_THREAD_SAFE (uint8_t)0x4 typedef char *SSkipListKey; typedef char *(*__sl_key_fn_t)(const void *); +typedef void (*sl_patch_row_fn_t)(void * pDst, const void * pSrc); +typedef void* (*iter_next_fn_t)(void *iter); + typedef struct SSkipListNode { uint8_t level; void * pData; @@ -95,6 +100,12 @@ typedef struct tSkipListState { uint64_t nTotalElapsedTimeForInsert; } tSkipListState; +typedef enum { + SSkipListPutSuccess = 0, + SSkipListPutEarlyStop = 1, + SSkipListPutSkipOne = 2 +} SSkipListPutStatus; + typedef struct SSkipList { unsigned int seed; __compar_fn_t comparFn; @@ -111,6 +122,7 @@ typedef struct SSkipList { #if SKIP_LIST_RECORD_PERFORMANCE tSkipListState state; // skiplist state #endif + tGenericSavedFunc* insertHandleFn; } SSkipList; typedef struct SSkipListIterator { @@ -118,7 +130,7 @@ typedef struct SSkipListIterator { SSkipListNode *cur; int32_t step; // the number of nodes that have been checked already int32_t order; // order of the iterator - SSkipListNode *next; // next points to the true qualified node in skip list + SSkipListNode *next; // next points to the true qualified node in skiplist } SSkipListIterator; #define SL_IS_THREAD_SAFE(s) (((s)->flags) & SL_THREAD_SAFE) @@ -132,7 +144,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ __sl_key_fn_t fn); void tSkipListDestroy(SSkipList *pSkipList); SSkipListNode * tSkipListPut(SSkipList *pSkipList, void *pData); -void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata); +void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t iterate); SArray * tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey); void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel); SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList); diff --git a/src/util/src/tfunctional.c b/src/util/src/tfunctional.c new file mode 100644 index 0000000000..c470a2b8ae --- /dev/null +++ b/src/util/src/tfunctional.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tfunctional.h" +#include "tarray.h" + + +tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs) { + tGenericSavedFunc* pSavedFunc = malloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*))); + pSavedFunc->func = func; + return pSavedFunc; +} + +tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs) { + tI32SavedFunc* pSavedFunc = malloc(sizeof(tI32SavedFunc) + numOfArgs * sizeof(void *)); + pSavedFunc->func = func; + return pSavedFunc; +} + +tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs) { + tVoidSavedFunc* pSavedFunc = malloc(sizeof(tVoidSavedFunc) + numOfArgs * sizeof(void*)); + pSavedFunc->func = func; + return pSavedFunc; +} + +FORCE_INLINE void* genericInvoke(tGenericSavedFunc* const pSavedFunc) { + return pSavedFunc->func(pSavedFunc->args); +} + +FORCE_INLINE int32_t i32Invoke(tI32SavedFunc* const pSavedFunc) { + return pSavedFunc->func(pSavedFunc->args); +} + +FORCE_INLINE void voidInvoke(tVoidSavedFunc* const pSavedFunc) { + if(pSavedFunc) pSavedFunc->func(pSavedFunc->args); +} diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 082b454bb5..b464519ba6 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -16,6 +16,7 @@ #include "tskiplist.h" #include "os.h" #include "tcompare.h" +#include "tdataformat.h" #include "tulog.h" #include "tutil.h" @@ -31,6 +32,7 @@ static SSkipListNode *tSkipListNewNode(uint8_t level); static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward, bool hasDup); + static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList); static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList); static FORCE_INLINE int tSkipListUnlock(SSkipList *pSkipList); @@ -80,6 +82,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ #if SKIP_LIST_RECORD_PERFORMANCE pSkipList->state.nTotalMemSize += sizeof(SSkipList); #endif + pSkipList->insertHandleFn = NULL; return pSkipList; } @@ -97,6 +100,8 @@ void tSkipListDestroy(SSkipList *pSkipList) { tSkipListFreeNode(pTemp); } + tfree(pSkipList->insertHandleFn); + tSkipListUnlock(pSkipList); if (pSkipList->lock != NULL) { pthread_rwlock_destroy(pSkipList->lock); @@ -124,8 +129,7 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) { return pNode; } -// Put a batch of data into skiplist. The batch of data must be in ascending order -void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { +void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t iterate) { SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0}; SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; bool hasDup = false; @@ -135,17 +139,21 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { tSkipListWLock(pSkipList); + void* pData = iterate(iter); + if(pData == NULL) return; + // backward to put the first data - hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]); - tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup); + hasDup = tSkipListGetPosToPut(pSkipList, backward, pData); + + tSkipListPutImpl(pSkipList, pData, backward, false, hasDup); for (int level = 0; level < pSkipList->maxLevel; level++) { forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level); } // forward to put the rest of data - for (int idata = 1; idata < ndata; idata++) { - pDataKey = pSkipList->keyFn(ppData[idata]); + while ((pData = iterate(iter)) != NULL) { + pDataKey = pSkipList->keyFn(pData); hasDup = false; // Compare max key @@ -186,9 +194,8 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { } } - tSkipListPutImpl(pSkipList, ppData[idata], forward, true, hasDup); + tSkipListPutImpl(pSkipList, pData, forward, true, hasDup); } - tSkipListUnlock(pSkipList); } @@ -661,18 +668,40 @@ static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipL uint8_t dupMode = SL_DUP_MODE(pSkipList); SSkipListNode *pNode = NULL; - if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) { + if (hasDup && (dupMode != SL_ALLOW_DUP_KEY)) { if (dupMode == SL_UPDATE_DUP_KEY) { if (isForward) { pNode = SL_NODE_GET_FORWARD_POINTER(direction[0], 0); } else { pNode = SL_NODE_GET_BACKWARD_POINTER(direction[0], 0); } - atomic_store_ptr(&(pNode->pData), pData); + if (pSkipList->insertHandleFn) { + pSkipList->insertHandleFn->args[0] = pData; + pSkipList->insertHandleFn->args[1] = pNode->pData; + pData = genericInvoke(pSkipList->insertHandleFn); + } + if(pData) { + atomic_store_ptr(&(pNode->pData), pData); + } + } else { + //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows! + if(pSkipList->insertHandleFn) { + pSkipList->insertHandleFn->args[0] = NULL; + pSkipList->insertHandleFn->args[1] = NULL; + genericInvoke(pSkipList->insertHandleFn); + } } } else { pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList)); if (pNode != NULL) { + // insertHandleFn will be assigned only for timeseries data, + // in which case, pData is pointed to an memory to be freed later; + // while for metadata, the mem alloc will not be called. + if (pSkipList->insertHandleFn) { + pSkipList->insertHandleFn->args[0] = pData; + pSkipList->insertHandleFn->args[1] = NULL; + pData = genericInvoke(pSkipList->insertHandleFn); + } pNode->pData = pData; tSkipListDoInsert(pSkipList, direction, pNode, isForward); From 9fc4f7466f14527ebec5e7a3631b70f370327a48 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 29 Jul 2021 14:11:09 +0800 Subject: [PATCH 40/46] make CI happy --- tests/pytest/update/merge_commit_data2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/update/merge_commit_data2.py b/tests/pytest/update/merge_commit_data2.py index 3f0fc718ad..5fc58e3b4f 100644 --- a/tests/pytest/update/merge_commit_data2.py +++ b/tests/pytest/update/merge_commit_data2.py @@ -28,6 +28,7 @@ class TDTestCase: def restart_taosd(self,db): tdDnodes.stop(1) tdDnodes.startWithoutSleep(1) + tdsql.sleep(2) tdSql.execute("use %s;" % db) def date_to_timestamp_microseconds(self, date): From ab885b6b8c57d68b41bf4234d4b4507549cc7727 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Thu, 29 Jul 2021 14:52:33 +0800 Subject: [PATCH 41/46] fixed BUG TD-5596 --- src/util/src/tcompression.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c index 4472633304..b6b63f46c8 100644 --- a/src/util/src/tcompression.c +++ b/src/util/src/tcompression.c @@ -480,6 +480,10 @@ int tsCompressTimestampImp(const char *const input, const int nelements, char *c int64_t *istream = (int64_t *)input; int64_t prev_value = istream[0]; + if(prev_value >= 0x8000000000000000) { + uWarn("compression timestamp is over signed long long range. ts = 0x%"PRIx64" \n", prev_value); + goto _exit_over; + } int64_t prev_delta = -prev_value; uint8_t flags = 0, flag1 = 0, flag2 = 0; uint64_t dd1 = 0, dd2 = 0; From 8ad74438229b6597e7b8f071ad06decc2768e48e Mon Sep 17 00:00:00 2001 From: xiaolei li <85657333+xleili@users.noreply.github.com> Date: Thu, 29 Jul 2021 16:37:58 +0800 Subject: [PATCH 42/46] [TD-4944]:c# connector nanosecond support (#7041) * [TD-4944]:c# connector nanosecond support * [TD-4944]:c# connector nanosecond support * [TD-4944]:c# connector nanosecond support fix indent --- src/connector/C#/TDengineDriver.cs | 3 + tests/examples/C#/C#checker/C#checker.cs | 55 +++- tests/examples/C#/C#checker/TDengineDriver.cs | 267 ++++++++++-------- tests/examples/C#/TDengineDriver.cs | 4 + tests/examples/C#/taosdemo/TDengineDriver.cs | 3 + 5 files changed, 196 insertions(+), 136 deletions(-) diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs index 2c150341f6..e6c3a598ad 100644 --- a/src/connector/C#/TDengineDriver.cs +++ b/src/connector/C#/TDengineDriver.cs @@ -163,5 +163,8 @@ namespace TDengineDriver [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] static extern public int Close(IntPtr taos); + //get precisionin parameter restultset + [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] + static extern public int ResultPrecision(IntPtr taos); } } diff --git a/tests/examples/C#/C#checker/C#checker.cs b/tests/examples/C#/C#checker/C#checker.cs index 80fa3b8386..29ad290343 100644 --- a/tests/examples/C#/C#checker/C#checker.cs +++ b/tests/examples/C#/C#checker/C#checker.cs @@ -33,7 +33,7 @@ namespace TDengineDriver //sql parameters private string dbName; private string tbName; - + private string precision; private bool isInsertData; private bool isQueryData; @@ -61,9 +61,9 @@ namespace TDengineDriver tester.checkInsert(); tester.checkSelect(); tester.checkDropTable(); - + tester.dropDatabase(); tester.CloseConnection(); - + tester.cleanup(); } @@ -156,7 +156,9 @@ namespace TDengineDriver Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100"); Console.WriteLine("{0:G}{1:G}", indent, "-c"); Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory"); - + // + Console.WriteLine("{0:G}{1:G}", indent, "-ps"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configurate db precision,default millisecond"); ExitProgram(); } } @@ -168,9 +170,9 @@ namespace TDengineDriver host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); user = this.GetArgumentAsString(argv, "-u", "root"); password = this.GetArgumentAsString(argv, "-p", "taosdata"); - dbName = this.GetArgumentAsString(argv, "-db", "test"); + dbName = this.GetArgumentAsString(argv, "-d", "test"); tbName = this.GetArgumentAsString(argv, "-s", "weather"); - + precision = this.GetArgumentAsString(argv, "-ps", "ms"); isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0; isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0; tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10); @@ -183,6 +185,7 @@ namespace TDengineDriver { TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + Console.WriteLine("init..."); TDengine.Init(); Console.WriteLine("get connection starting..."); } @@ -204,7 +207,7 @@ namespace TDengineDriver public void createDatabase() { StringBuilder sql = new StringBuilder(); - sql.Append("create database if not exists ").Append(this.dbName); + sql.Append("create database if not exists ").Append(this.dbName).Append(" precision '").Append(this.precision).Append("'"); execute(sql.ToString()); } public void useDatabase() @@ -216,8 +219,8 @@ namespace TDengineDriver public void checkSelect() { StringBuilder sql = new StringBuilder(); - sql.Append("select * from test.weather"); - execute(sql.ToString()); + sql.Append("select * from ").Append(this.dbName).Append(".").Append(this.tbName); + ExecuteQuery(sql.ToString()); } public void createTable() { @@ -228,7 +231,7 @@ namespace TDengineDriver public void checkInsert() { StringBuilder sql = new StringBuilder(); - sql.Append("insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)"); + sql.Append("insert into ").Append(this.dbName).Append(".").Append(this.tbName).Append("(ts, temperature, humidity) values(now, 20.5, 34)"); execute(sql.ToString()); } public void checkDropTable() @@ -237,6 +240,12 @@ namespace TDengineDriver sql.Append("drop table if exists ").Append(this.dbName).Append(".").Append(this.tbName).Append(""); execute(sql.ToString()); } + public void dropDatabase() + { + StringBuilder sql = new StringBuilder(); + sql.Append("drop database if exists ").Append(this.dbName); + execute(sql.ToString()); + } public void execute(string sql) { DateTime dt1 = DateTime.Now; @@ -266,6 +275,7 @@ namespace TDengineDriver DateTime dt1 = DateTime.Now; long queryRows = 0; IntPtr res = TDengine.Query(conn, sql); + getPrecision(res); if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { Console.Write(sql.ToString() + " failure, "); @@ -379,8 +389,31 @@ namespace TDengineDriver static void ExitProgram() { - TDengine.Cleanup(); System.Environment.Exit(0); } + + public void cleanup() + { + Console.WriteLine("clean up..."); + System.Environment.Exit(0); + } + // method to get db precision + public void getPrecision(IntPtr res) + { + int psc=TDengine.ResultPrecision(res); + switch(psc) + { + case 0: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"millisecond"); + break; + case 1: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"microsecond"); + break; + case 2: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"nanosecond"); + break; + } + + } } } diff --git a/tests/examples/C#/C#checker/TDengineDriver.cs b/tests/examples/C#/C#checker/TDengineDriver.cs index b6f143e181..2864b7bcdd 100644 --- a/tests/examples/C#/C#checker/TDengineDriver.cs +++ b/tests/examples/C#/C#checker/TDengineDriver.cs @@ -19,136 +19,153 @@ using System.Runtime.InteropServices; namespace TDengineDriver { - enum TDengineDataType { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10 // unicode string - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() + enum TDengineDataType { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOLEAN"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "BYTE"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SHORT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "LONG"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } - } - } - - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; - - [DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); - - [DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); - - [DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); - - [DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - - [DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes } - [DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); - - [DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); - - [DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); - - [DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); - - [DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) + enum TDengineInitOption { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 } - [DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOL"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "TINYINT"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SMALLINT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } - [DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; - [DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - } -} \ No newline at end of file + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + + //get precisionin parameter restultset + [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] + static extern public int ResultPrecision(IntPtr taos); + } +} diff --git a/tests/examples/C#/TDengineDriver.cs b/tests/examples/C#/TDengineDriver.cs index 2c150341f6..2864b7bcdd 100644 --- a/tests/examples/C#/TDengineDriver.cs +++ b/tests/examples/C#/TDengineDriver.cs @@ -163,5 +163,9 @@ namespace TDengineDriver [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] static extern public int Close(IntPtr taos); + + //get precisionin parameter restultset + [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] + static extern public int ResultPrecision(IntPtr taos); } } diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs index 2c150341f6..e6c3a598ad 100644 --- a/tests/examples/C#/taosdemo/TDengineDriver.cs +++ b/tests/examples/C#/taosdemo/TDengineDriver.cs @@ -163,5 +163,8 @@ namespace TDengineDriver [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] static extern public int Close(IntPtr taos); + //get precisionin parameter restultset + [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] + static extern public int ResultPrecision(IntPtr taos); } } From 3f7239b916d7a349024a009a1f73652a0a02263f Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 29 Jul 2021 18:18:06 +0800 Subject: [PATCH 43/46] change --- tests/pytest/update/merge_commit_data2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/update/merge_commit_data2.py b/tests/pytest/update/merge_commit_data2.py index 5fc58e3b4f..d4dbfec554 100644 --- a/tests/pytest/update/merge_commit_data2.py +++ b/tests/pytest/update/merge_commit_data2.py @@ -28,7 +28,7 @@ class TDTestCase: def restart_taosd(self,db): tdDnodes.stop(1) tdDnodes.startWithoutSleep(1) - tdsql.sleep(2) + tdSql.sleep(2) tdSql.execute("use %s;" % db) def date_to_timestamp_microseconds(self, date): From b9db84586cd29cfbfbed767bec46f624c6555c36 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 30 Jul 2021 02:17:10 +0800 Subject: [PATCH 44/46] [TD-5534]:fix high risk strncpy to tstrncpy --- src/client/src/tscSQLParser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 8be38be504..0a018c863f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8415,7 +8415,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS return invalidOperationMsg(msgBuf, "subquery alias name too long"); } - tstrncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, sizeof(pTableMetaInfo1->aliasName)); + tstrncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, subInfo->aliasName.n + 1); } taosArrayPush(pQueryInfo->pUpstream, &pSub); From 72a05d1e809b6e28ea71737a0c080dfb9cb50129 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 30 Jul 2021 09:32:13 +0800 Subject: [PATCH 45/46] fix --- tests/pytest/update/merge_commit_data2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/update/merge_commit_data2.py b/tests/pytest/update/merge_commit_data2.py index d4dbfec554..a334f39e86 100644 --- a/tests/pytest/update/merge_commit_data2.py +++ b/tests/pytest/update/merge_commit_data2.py @@ -28,7 +28,7 @@ class TDTestCase: def restart_taosd(self,db): tdDnodes.stop(1) tdDnodes.startWithoutSleep(1) - tdSql.sleep(2) + tdLog.sleep(2) tdSql.execute("use %s;" % db) def date_to_timestamp_microseconds(self, date): From 5f37d8a2a5038dcf600432a12bd83ada29a3fc4a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 30 Jul 2021 11:27:34 +0800 Subject: [PATCH 46/46] [TD-5534]:fix test case out of date error --- tests/script/general/parser/function.sim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index a3470b1763..5edadad3a6 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -783,7 +783,7 @@ endi sql create stable st1 (ts timestamp, f1 int, f2 int) tags (id int); sql create table tb1 using st1 tags(1); -sql insert into tb1 values (now, 1, 1); +sql insert into tb1 values ('2021-07-02 00:00:00', 1, 1); sql select stddev(f1) from st1 group by f1;