From 10774f580cc7edea2e8851ce19528c3786cdb621 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 17 Jun 2020 14:59:35 +0800 Subject: [PATCH 01/10] [TD-350]: add filter test cases for range --- tests/pytest/query/filterAllIntTypes.py | 30 ++++++++++++++++++++++ tests/pytest/query/filterFloatAndDouble.py | 26 +++++++++++++++++++ tests/pytest/query/filterOtherTypes.py | 4 +-- 3 files changed, 58 insertions(+), 2 deletions(-) diff --git a/tests/pytest/query/filterAllIntTypes.py b/tests/pytest/query/filterAllIntTypes.py index cf65d2b102..a2bab63c88 100644 --- a/tests/pytest/query/filterAllIntTypes.py +++ b/tests/pytest/query/filterAllIntTypes.py @@ -88,6 +88,19 @@ class TDTestCase: tdSql.query("select * from st%s where num != 50" % curType) tdSql.checkRows(101) + # range for int type on column + tdSql.query("select * from st%s where num > 50 and num < 100" % curType) + tdSql.checkRows(49) + + tdSql.query("select * from st%s where num >= 50 and num < 100" % curType) + tdSql.checkRows(50) + + tdSql.query("select * from st%s where num > 50 and num <= 100" % curType) + tdSql.checkRows(50) + + tdSql.query("select * from st%s where num >= 50 and num <= 100" % curType) + tdSql.checkRows(51) + # > for int type on tag tdSql.query("select * from st%s where id > 5" % curType) tdSql.checkRows(52) @@ -116,6 +129,23 @@ class TDTestCase: tdSql.query("select * from st%s where id != 5" % curType) tdSql.checkRows(92) + # != for int type on tag + tdSql.query("select * from st%s where id != 5" % curType) + tdSql.checkRows(92) + + # range for int type on tag + tdSql.query("select * from st%s where id > 5 and id < 7" % curType) + tdSql.checkRows(10) + + tdSql.query("select * from st%s where id >= 5 and id < 7" % curType) + tdSql.checkRows(20) + + tdSql.query("select * from st%s where id > 5 and id <= 7" % curType) + tdSql.checkRows(20) + + tdSql.query("select * from st%s where id >= 5 and id <= 7" % curType) + tdSql.checkRows(30) + print( "======= Verify filter for %s type finished =========" % curType) diff --git a/tests/pytest/query/filterFloatAndDouble.py b/tests/pytest/query/filterFloatAndDouble.py index bd349a2a25..8d0e5baeca 100644 --- a/tests/pytest/query/filterFloatAndDouble.py +++ b/tests/pytest/query/filterFloatAndDouble.py @@ -67,6 +67,19 @@ class TDTestCase: tdSql.query("select * from st where num < 5.5") tdSql.checkRows(4) + # range for float type on column + tdSql.query("select * from st where num > 5.5 and num < 11.0") + tdSql.checkRows(4) + + tdSql.query("select * from st where num >= 5.5 and num < 11.0") + tdSql.checkRows(5) + + tdSql.query("select * from st where num > 5.5 and num <= 11.0") + tdSql.checkRows(5) + + tdSql.query("select * from st where num >= 5.5 and num <= 11.0") + tdSql.checkRows(6) + # > for float type on tag tdSql.query("select * from st where tagcol1 > 1.1") tdSql.checkRows(0) @@ -123,6 +136,19 @@ class TDTestCase: tdSql.query("select * from st where speed < 11.5") tdSql.checkRows(4) + # range for double type on column + tdSql.query("select * from st where speed > 11.5 and speed < 20.7") + tdSql.checkRows(3) + + tdSql.query("select * from st where speed >= 11.5 and speed < 20.7") + tdSql.checkRows(4) + + tdSql.query("select * from st where speed > 11.5 and speed <= 20.7") + tdSql.checkRows(4) + + tdSql.query("select * from st where speed >= 11.5 and speed <= 20.7") + tdSql.checkRows(5) + # > for double type on tag tdSql.query("select * from st where tagcol2 > 2.3") tdSql.checkRows(0) diff --git a/tests/pytest/query/filterOtherTypes.py b/tests/pytest/query/filterOtherTypes.py index bc7df18c8d..1db5497604 100644 --- a/tests/pytest/query/filterOtherTypes.py +++ b/tests/pytest/query/filterOtherTypes.py @@ -30,9 +30,9 @@ class TDTestCase: print("======= Verify filter for bool, nchar and binary type =========") tdLog.debug( - "create table st(ts timestamp, tbcol1 bool, tbcol2 nchar(10), tbcol3 binary(20)) tags(tagcol1 bool, tagcol2 nchar(10), tagcol3 binary(10))") + "create table st(ts timestamp, tbcol1 bool, tbcol2 binary(10), tbcol3 nchar(20)) tags(tagcol1 bool, tagcol2 binary(10), tagcol3 nchar(10))") tdSql.execute( - "create table st(ts timestamp, tbcol1 bool, tbcol2 nchar(10), tbcol3 binary(20)) tags(tagcol1 bool, tagcol2 nchar(10), tagcol3 binary(10))") + "create table st(ts timestamp, tbcol1 bool, tbcol2 binary(10), tbcol3 nchar(20)) tags(tagcol1 bool, tagcol2 binary(10), tagcol3 nchar(10))") tdSql.execute("create table st1 using st tags(true, 'table1', '水表')") for i in range(1, 6): From cf2a99819b9415454787831b8e0ed647b533469a Mon Sep 17 00:00:00 2001 From: Hui Li Date: Wed, 17 Jun 2020 15:19:46 +0800 Subject: [PATCH 02/10] [add sim case] --- .../dn3_mn1_vnode_corruptFile_online.sim | 140 +++++- .../dn3_mn1_vnode_noCorruptFile_offline.sim | 428 ++++++++++++++++++ 2 files changed, 567 insertions(+), 1 deletion(-) create mode 100644 tests/script/unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim index 477f4684b5..a526192b15 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim @@ -173,4 +173,142 @@ sql select count(*) from $stb print data00 $data00 if $data00 != $totalRows then return -1 -endi \ No newline at end of file +endi + +print ============== step6: stop dnode3 for falling disc +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep $sleepTimer + +$loopCnt = 0 +wait_dnode3_offline_0: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode3_offline_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +#$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 +#$dnode5Status = $data4_5 + +if $dnode2Status != offline then + sleep 2000 + goto wait_dnode3_offline_0 +endi + +if $dnode3Status != offline then + sleep 2000 + goto wait_dnode3_offline_0 +endi + + +$loopCnt = 0 +wait_dnode3_vgroup_offline: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show vgroups +if $rows != 1 then + sleep 2000 + goto wait_dnode3_vgroup_offline +endi +print show vgroups: +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 +$dnode2Vtatus = $data7_2 +$dnode3Vtatus = $data4_2 + +if $dnode2Vtatus != offline then + sleep 2000 + goto wait_dnode3_vgroup_offline +endi +if $dnode3Vtatus != offline then + sleep 2000 + goto wait_dnode3_vgroup_offline +endi + +print ============== step7: restart dnode3, and run query +system sh/exec.sh -n dnode3 -s start +sleep $sleepTimer +$loopCnt = 0 +wait_dnode3_reready: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode3_reready +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 + +if $dnode3Status != ready then + sleep 2000 + goto wait_dnode3_reready +endi + +$loopCnt = 0 +wait_dnode3_vgroup_master_1: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show vgroups +if $rows != 1 then + sleep 2000 + goto wait_dnode3_vgroup_master_1 +endi +print show vgroups: +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 +$dnode2Vtatus = $data7_2 +$dnode3Vtatus = $data4_2 + +if $dnode2Vtatus != offline then + sleep 2000 + goto wait_dnode3_vgroup_master_1 +endi +if $dnode3Vtatus != master then + sleep 2000 + goto wait_dnode3_vgroup_master_1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + + + + + + + + + + diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim new file mode 100644 index 0000000000..96def07f4a --- /dev/null +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim @@ -0,0 +1,428 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1 + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 +system sh/cfg.sh -n dnode2 -c walLevel -v 2 +system sh/cfg.sh -n dnode3 -c walLevel -v 2 +system sh/cfg.sh -n dnode4 -c walLevel -v 2 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4 + +system sh/cfg.sh -n dnode1 -c alternativeRole -v 1 +system sh/cfg.sh -n dnode2 -c alternativeRole -v 2 +system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 +system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 + +system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator + +print ============== step0: start tarbitrator +system sh/exec_tarbitrator.sh -s start + +print ============== step1: start dnode1, only deploy mnode +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect + +print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +#system sh/exec.sh -n dnode4 -s start +sql create dnode $hostname2 +sql create dnode $hostname3 +#sql create dnode $hostname4 +sleep 3000 + +$totalTableNum = 10 +$sleepTimer = 3000 + +$db = db +sql create database $db replica 2 maxTables $totalTableNum +sql use $db + +# create table , insert data +$stb = stb +sql create table $stb (ts timestamp, c1 int) tags(t1 int) +$rowNum = 100 +$tblNum = $totalTableNum +$totalRows = 0 +$tsStart = 1420041600000 + +$i = 0 +while $i < $tblNum + $tb = tb . $i + sql create table $tb using $stb tags( $i ) + + $x = 0 + while $x < $rowNum + $ts = $tsStart + $x + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x ) + $x = $x + 60 + endw + $totalRows = $totalRows + $x + print info: inserted $x rows into $tb and totalRows: $totalRows + $i = $i + 1 +endw + +sql select count(*) from $stb +sleep 1000 +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +print ============== step3: stop dnode3 for falling disc, then corrupt vnode data file in dnode3 +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep $sleepTimer + +$loopCnt = 0 +wait_dnode3_offline_0: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode3_offline_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +#$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 +#$dnode5Status = $data4_5 + +if $dnode3Status != offline then + sleep 2000 + goto wait_dnode3_offline_0 +endi + +$loopCnt = 0 +wait_dnode3_vgroup_offline: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show vgroups +if $rows != 1 then + sleep 2000 + goto wait_dnode3_vgroup_offline +endi +print show vgroups: +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 +$dnode3Vtatus = $data4_2 +$dnode2Vtatus = $data7_2 + +if $dnode3Vtatus != offline then + sleep 2000 + goto wait_dnode3_vgroup_offline +endi +if $dnode2Vtatus != master then + sleep 2000 + goto wait_dnode3_vgroup_offline +endi + +#$expectCnt = 3 . : +#print expectCnt: [ $expectCnt ] +#system_content ls ../../../sim/dnode3/data/vnode/vnode2/tsdb/data/ -l | grep "^-" | wc -l | tr '\n' ':' +#system_content ls ../../../sim/dnode3/data/vnode/vnode2/tsdb/data/ -l | grep "^-" | wc -l | tr '\n' ':' +#print --2-->dnode3 data files: [ $system_content ] + + +system_content ls ../../../sim/dnode2/data/vnode/vnode2/tsdb/data/ -l | grep "^-" | wc -l | tr -d '\n' +print ---->dnode2 data files: $system_content expect: 0 +if $system_content != 0 then + return -1 +endi + +system_content ls ../../../sim/dnode3/data/vnode/vnode2/tsdb/data/ -l | grep "^-" | wc -l | tr -d '\n' +print ---->dnode3 data files: $system_content expect: 3 +if $system_content != 3 then + return -1 +endi + +#system echo "haha, nothing......" > ../../../sim/dnode3/data/vnode/vnode2/tsdb/data/f1643.data + +print ============== step3-1: insert some news data for let version changed +sql insert into $tb values ( now + 0a , $x ) ( now + 1a , $x ) ( now + 2a , $x ) +sql insert into $tb values ( now + 10a , $x ) ( now + 11a , $x ) ( now + 12a , $x ) +$totalRows = $totalRows + 6 +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +print ============== step4: restart dnode3, and run query +system sh/exec.sh -n dnode3 -s start +sleep $sleepTimer +$loopCnt = 0 +wait_dnode3_reready: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode3_reready +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +#$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 +#$dnode5Status = $data4_5 + +if $dnode3Status != ready then + sleep 2000 + goto wait_dnode3_reready +endi +$loopCnt = 0 +wait_dnode3_vgroup_slave: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show vgroups +if $rows != 1 then + sleep 2000 + goto wait_dnode3_vgroup_slave +endi +print show vgroups: +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 +$dnode2Vtatus = $data7_2 +$dnode3Vtatus = $data4_2 + +if $dnode2Vtatus != master then + sleep 2000 + goto wait_dnode3_vgroup_slave +endi +if $dnode3Vtatus != slave then + sleep 2000 + goto wait_dnode3_vgroup_slave +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +system_content ls ../../../sim/dnode2/data/vnode/vnode2/tsdb/data/ -l |grep "^-"|wc -l | tr -d '\n' +print ----> dnode2 data files: $system_content expect: 0 +if $system_content != 0 then + return -1 +endi + +system_content ls ../../../sim/dnode3/data/vnode/vnode2/tsdb/data/ -l |grep "^-"|wc -l | tr -d '\n' +print ----> dnode3 data files: $system_content expect: 0 +if $system_content != 0 then + print there should be no data file in dnode3 after sync + return -1 +endi + +print ============== step5: stop dnode2, and check if dnode3 sync ok +system sh/exec.sh -n dnode2 -s stop -x SIGINT +sleep $sleepTimer + +$loopCnt = 0 +wait_dnode2_offline_0: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode2_offline_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +#$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 +#$dnode5Status = $data4_5 + +if $dnode2Status != offline then + sleep 2000 + goto wait_dnode2_offline_0 +endi + +$loopCnt = 0 +wait_dnode3_vgroup_master: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi + +sql show vgroups +if $rows != 1 then + sleep 2000 + goto wait_dnode3_vgroup_master +endi +print show vgroups: +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 +$dnode2Vtatus = $data7_2 +$dnode3Vtatus = $data4_2 + +if $dnode2Vtatus != offline then + sleep 2000 + goto wait_dnode3_vgroup_master +endi +if $dnode3Vtatus != master then + sleep 2000 + goto wait_dnode3_vgroup_master +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +print ============== step6: stop dnode3 for falling disck +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep $sleepTimer +sql show dnodes +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 + +sql show vgroups +print show vgroups: +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 + + +print ============== step7: restart dnode3, and run query +system sh/exec.sh -n dnode3 -s start +sleep $sleepTimer +$loopCnt = 0 +wait_dnode3_reready_2: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode3_reready_2 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 + +if $dnode3Status != ready then + sleep 2000 + goto wait_dnode3_reready_2 +endi + +$loopCnt = 0 +wait_dnode3_vgroup_master_2: +$loopCnt = $loopCnt + 1 +if $loopCnt == 10 then + return -1 +endi +sql show vgroups +if $rows != 1 then + sleep 2000 + goto wait_dnode3_vgroup_master_2 +endi +print show vgroups: +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 +$dnode2Vtatus = $data7_2 +$dnode3Vtatus = $data4_2 + +if $dnode2Vtatus != offline then + sleep 2000 + goto wait_dnode3_vgroup_master_2 +endi +if $dnode3Vtatus != master then + sleep 2000 + goto wait_dnode3_vgroup_master_2 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + + + + + + + + + + + + + + + + + + + + + + + From a681e157f4bc20ad8f944b882ca1798e5520867d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 17 Jun 2020 15:48:06 +0800 Subject: [PATCH 03/10] modify general/parser/stream.sim to make its result consistant. [TD-642] --- tests/script/fullGeneralSuite.sim | 2 +- tests/script/general/parser/stream.sim | 8 ++++---- tests/script/regressionSuite.sim | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim index a7d6707e7d..9d9bf91758 100644 --- a/tests/script/fullGeneralSuite.sim +++ b/tests/script/fullGeneralSuite.sim @@ -135,7 +135,7 @@ run general/parser/select_with_tags.sim #unsupport run general/parser/repeatAlter.sim #unsupport run general/parser/slimit_alter_tags.sim #unsupport run general/parser/stream_on_sys.sim -#unsupport run general/parser/stream.sim +run general/parser/stream.sim #unsupport run general/parser/repeatStream.sim run general/stable/disk.sim run general/stable/dnode3.sim diff --git a/tests/script/general/parser/stream.sim b/tests/script/general/parser/stream.sim index fca5f37d74..2f233b6189 100644 --- a/tests/script/general/parser/stream.sim +++ b/tests/script/general/parser/stream.sim @@ -192,7 +192,7 @@ sql create table tb4 using stb tags(4, 'tb4') sql create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb where ts < now + 30s interval(4s) sliding(2s) sleep 1000 sql insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true) -sleep 20000 +sleep 30000 sql select * from strm0 order by ts desc sleep 1000 if $rows != 2 then @@ -203,9 +203,9 @@ endi sql insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true) sleep 30000 sql select * from strm0 order by ts desc -sleep 1000 -if $rows != 4 then - print ==== expect rows = 4, actually returned rows = $rows +sleep 10000 +if $rows == 4 then + print ==== actually returned rows = $rows, expect always not equal to 4 return -1 endi diff --git a/tests/script/regressionSuite.sim b/tests/script/regressionSuite.sim index 6ef4001495..ae5c200a80 100644 --- a/tests/script/regressionSuite.sim +++ b/tests/script/regressionSuite.sim @@ -135,7 +135,7 @@ run general/parser/select_with_tags.sim #unsupport run general/parser/repeatAlter.sim #unsupport run general/parser/slimit_alter_tags.sim #unsupport run general/parser/stream_on_sys.sim -#unsupport run general/parser/stream.sim +run general/parser/stream.sim #unsupport run general/parser/repeatStream.sim run general/stable/disk.sim run general/stable/dnode3.sim From 986bf29aa6322035092be0f311e7ecf20f07ccc3 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 17 Jun 2020 07:55:46 +0000 Subject: [PATCH 04/10] invalid refcount calc while alter table --- src/mnode/src/mnodeSdb.c | 4 ++-- src/mnode/src/mnodeTable.c | 11 +++++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index d8fc9aee5a..c0c5b9df30 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -389,7 +389,7 @@ void sdbIncRef(void *handle, void *pObj) { SSdbTable *pTable = handle; int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); atomic_add_fetch_32(pRefCount, 1); - if (0 && (pTable->tableId == SDB_TABLE_CTABLE || pTable->tableId == SDB_TABLE_DB)) { + if (1) { sdbTrace("add ref to table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount); } } @@ -400,7 +400,7 @@ void sdbDecRef(void *handle, void *pObj) { SSdbTable *pTable = handle; int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); int32_t refCount = atomic_sub_fetch_32(pRefCount, 1); - if (0 && (pTable->tableId == SDB_TABLE_CTABLE || pTable->tableId == SDB_TABLE_DB)) { + if (1) { sdbTrace("def ref of table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount); } diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index a421b01d7a..a30b359970 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -185,7 +185,9 @@ static int32_t mnodeChildTableActionUpdate(SSdbOper *pOper) { void *oldTableId = pTable->info.tableId; void *oldSql = pTable->sql; void *oldSchema = pTable->schema; + int32_t oldRefCount = pTable->refCount; memcpy(pTable, pNew, sizeof(SChildTableObj)); + pTable->refCount = oldRefCount; pTable->sql = pNew->sql; pTable->schema = pNew->schema; free(pNew); @@ -375,7 +377,7 @@ static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCt pStable->numOfTables++; if (pStable->vgHash == NULL) { - pStable->vgHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + pStable->vgHash = taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); } if (pStable->vgHash != NULL) { @@ -439,9 +441,14 @@ static int32_t mnodeSuperTableActionUpdate(SSdbOper *pOper) { if (pTable != pNew) { void *oldTableId = pTable->info.tableId; void *oldSchema = pTable->schema; + void *oldVgHash = pTable->vgHash; + int32_t oldRefCount = pTable->refCount; + memcpy(pTable, pNew, sizeof(SSuperTableObj)); + + pTable->vgHash = oldVgHash; + pTable->refCount = oldRefCount; pTable->schema = pNew->schema; - free(pNew->vgHash); free(pNew); free(oldTableId); free(oldSchema); From 35c70a73a0336c56d08c9f815134d39b316c015b Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 17 Jun 2020 08:03:22 +0000 Subject: [PATCH 05/10] invalid write while alter vgroup --- src/mnode/src/mnodeSdb.c | 4 ++-- src/mnode/src/mnodeVgroup.c | 12 ++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index c0c5b9df30..d8fc9aee5a 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -389,7 +389,7 @@ void sdbIncRef(void *handle, void *pObj) { SSdbTable *pTable = handle; int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); atomic_add_fetch_32(pRefCount, 1); - if (1) { + if (0 && (pTable->tableId == SDB_TABLE_CTABLE || pTable->tableId == SDB_TABLE_DB)) { sdbTrace("add ref to table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount); } } @@ -400,7 +400,7 @@ void sdbDecRef(void *handle, void *pObj) { SSdbTable *pTable = handle; int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); int32_t refCount = atomic_sub_fetch_32(pRefCount, 1); - if (1) { + if (0 && (pTable->tableId == SDB_TABLE_CTABLE || pTable->tableId == SDB_TABLE_DB)) { sdbTrace("def ref of table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount); } diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index 0475a48ce2..efa823d531 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -149,9 +149,17 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) { } } - memcpy(pVgroup, pNew, sizeof(SVgObj)); - free(pNew); + void *idPool = pVgroup->idPool; + void *tableList = pVgroup->tableList; + int32_t oldRefCount = pVgroup->refCount; + memcpy(pVgroup, pNew, sizeof(SVgObj)); + + free(pNew); + pVgroup->refCount = oldRefCount; + pVgroup->idPool = idPool; + pVgroup->tableList = tableList; + for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SDnodeObj *pDnode = mnodeGetDnode(pVgroup->vnodeGid[i].dnodeId); pVgroup->vnodeGid[i].pDnode = pDnode; From 0201008c9faa09a8b24bba274d8c707d8a356cc5 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 17 Jun 2020 08:31:29 +0000 Subject: [PATCH 06/10] [TD-669] incomplete update while alter vgroup --- src/mnode/src/mnodeTable.c | 7 ++++++- src/mnode/src/mnodeVgroup.c | 11 +---------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index a30b359970..93ee96a674 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -182,14 +182,19 @@ static int32_t mnodeChildTableActionUpdate(SSdbOper *pOper) { SChildTableObj *pNew = pOper->pObj; SChildTableObj *pTable = mnodeGetChildTable(pNew->info.tableId); if (pTable != pNew) { - void *oldTableId = pTable->info.tableId; + void *oldTableId = pTable->info.tableId; void *oldSql = pTable->sql; void *oldSchema = pTable->schema; + void *oldSTable = pTable->superTable; int32_t oldRefCount = pTable->refCount; + memcpy(pTable, pNew, sizeof(SChildTableObj)); + pTable->refCount = oldRefCount; pTable->sql = pNew->sql; pTable->schema = pNew->schema; + pTable->superTable = oldSTable; + free(pNew); free(oldSql); free(oldSchema); diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index efa823d531..10dfb2a28a 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -149,17 +149,8 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) { } } - void *idPool = pVgroup->idPool; - void *tableList = pVgroup->tableList; - int32_t oldRefCount = pVgroup->refCount; + memcpy(pVgroup, pNew, tsVgUpdateSize); - memcpy(pVgroup, pNew, sizeof(SVgObj)); - - free(pNew); - pVgroup->refCount = oldRefCount; - pVgroup->idPool = idPool; - pVgroup->tableList = tableList; - for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SDnodeObj *pDnode = mnodeGetDnode(pVgroup->vnodeGid[i].dnodeId); pVgroup->vnodeGid[i].pDnode = pDnode; From 7e93758b67b225a20fff903f2d5db238f5cd5205 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Wed, 17 Jun 2020 17:11:21 +0800 Subject: [PATCH 07/10] TD-617: fix some coverity issues --- src/client/src/TSDBJNIConnector.c | 20 ++++++++++---------- src/client/src/tscAsync.c | 8 ++++---- src/client/src/tscFunctionImpl.c | 26 +++++++------------------- src/client/src/tscParseInsert.c | 3 +-- src/client/src/tscSQLParser.c | 9 ++++----- src/client/src/tscSecondaryMerge.c | 6 +++++- 6 files changed, 31 insertions(+), 41 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 459b0f2b98..a9a34286a8 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -151,8 +151,8 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_dumpMemoryLeakImp JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *env, jobject jobj, jstring jconfigDir) { if (jconfigDir != NULL) { const char *confDir = (*env)->GetStringUTFChars(env, jconfigDir, NULL); - if (confDir && strlen(configDir) != 0) { - strcpy(configDir, confDir); + if (confDir && strlen(confDir) != 0) { + tstrncpy(configDir, confDir, TSDB_FILENAME_LEN); } (*env)->ReleaseStringUTFChars(env, jconfigDir, confDir); } @@ -385,7 +385,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm } jint ret = taos_affected_rows((SSqlObj *)res); - jniTrace("jobj:%p, conn:%p, sql:%p, affect rows:%d", jobj, tscon, (void *)con, res, ret); + jniTrace("jobj:%p, conn:%p, sql:%p, affect rows:%d", jobj, tscon, (SSqlObj*)res, ret); return ret; } @@ -411,10 +411,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData // jobject arrayListObj = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp, ""); if (num_fields == 0) { - jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields); return JNI_NUM_OF_FIELDS_0; } else { - jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields); for (int i = 0; i < num_fields; ++i) { jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp); (*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type); @@ -465,7 +465,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn int num_fields = taos_num_fields(result); if (num_fields == 0) { - jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields); return JNI_NUM_OF_FIELDS_0; } @@ -473,7 +473,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn if (row == NULL) { int tserrno = taos_errno(result); if (tserrno == 0) { - jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, res, num_fields); + jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, num_fields); return JNI_FETCH_END; } else { jniTrace("jobj:%p, conn:%p, interruptted query", jobj, tscon); @@ -571,9 +571,9 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNI sub = (jlong)tsub; if (sub == 0) { - jniTrace("jobj:%p, failed to subscribe: topic:%s", jobj, jtopic); + jniTrace("jobj:%p, failed to subscribe: topic:%s", jobj, topic); } else { - jniTrace("jobj:%p, successfully subscribe: topic: %s", jobj, jtopic); + jniTrace("jobj:%p, successfully subscribe: topic: %s", jobj, topic); } if (topic != NULL) (*env)->ReleaseStringUTFChars(env, jtopic, topic); @@ -583,7 +583,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNI } JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) { - jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub); + jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%" PRId64, jobj, sub); jniGetGlobalMethod(env); TAOS_SUB *tsub = (TAOS_SUB *)sub; diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index f4fad42719..770f328634 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -285,7 +285,7 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), tscProcessSql(pSql); } else { - SSchedMsg schedMsg; + SSchedMsg schedMsg = { 0 }; schedMsg.fp = tscProcessFetchRow; schedMsg.ahandle = pSql; schedMsg.thandle = pRes->tsrow; @@ -387,7 +387,7 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { int32_t* c = malloc(sizeof(int32_t)); *c = code; - SSchedMsg schedMsg; + SSchedMsg schedMsg = { 0 }; schedMsg.fp = tscProcessAsyncError; schedMsg.ahandle = fp; schedMsg.thandle = param; @@ -403,7 +403,7 @@ void tscQueueAsyncRes(SSqlObj *pSql) { tscError("%p add into queued async res, code:%s", pSql, tstrerror(pSql->res.code)); } - SSchedMsg schedMsg; + SSchedMsg schedMsg = { 0 }; schedMsg.fp = tscProcessAsyncRes; schedMsg.ahandle = pSql; schedMsg.thandle = (void *)1; @@ -420,7 +420,7 @@ void tscProcessAsyncFree(SSchedMsg *pMsg) { void tscQueueAsyncFreeResult(SSqlObj *pSql) { tscTrace("%p sqlObj put in queue to async free", pSql); - SSchedMsg schedMsg; + SSchedMsg schedMsg = { 0 }; schedMsg.fp = tscProcessAsyncFree; schedMsg.ahandle = pSql; schedMsg.thandle = (void *)1; diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 457e187971..f0be65882c 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -1853,26 +1853,14 @@ static void last_row_function(SQLFunctionCtx *pCtx) { static void last_row_finalizer(SQLFunctionCtx *pCtx) { // do nothing at the first stage SResultInfo *pResInfo = GET_RES_INFO(pCtx); - if (pCtx->currentStage == SECONDARY_STAGE_MERGE) { - if (pResInfo->hasResult != DATA_SET_FLAG) { - if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pCtx->aOutputBuf, pCtx->outputType); - } else { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); - } - - return; - } - } else { - if (pResInfo->hasResult != DATA_SET_FLAG) { - if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pCtx->aOutputBuf, pCtx->outputType); - } else { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); - } - - return; + if (pResInfo->hasResult != DATA_SET_FLAG) { + if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { + setVardataNull(pCtx->aOutputBuf, pCtx->outputType); + } else { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); } + + return; } GET_RES_INFO(pCtx)->numOfRes = 1; diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index afd1a3e8c8..13fc298d6e 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -252,7 +252,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, numType = tscToInteger(pToken, &iv, &endptr); if (TK_ILLEGAL == numType) { return tscInvalidSQLErrMsg(msg, "invalid bigint data", pToken->z); - } else if (errno == ERANGE || iv > INT64_MAX || iv <= INT64_MIN) { + } else if (errno == ERANGE || iv == INT64_MIN) { return tscInvalidSQLErrMsg(msg, "bigint data overflow", pToken->z); } @@ -594,7 +594,6 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3 size_t remain = pDataBlock->nAllocSize - pDataBlock->size; const int factor = 5; uint32_t nAllocSizeOld = pDataBlock->nAllocSize; - assert(pDataBlock->headerSize >= 0); // expand the allocated size if (remain < rowSize * factor) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index db12693c29..110137ba5e 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1445,7 +1445,7 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema, } if (aliasName != NULL) { - strcpy(columnName, aliasName); + tstrncpy(columnName, aliasName, sizeof(columnName)); } else { getRevisedName(columnName, functionID, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name); } @@ -2221,7 +2221,6 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { // db prefix in tagCond, show table conds in payload SSQLToken* pDbPrefixToken = &pShowInfo->prefix; if (pDbPrefixToken->type != 0) { - assert(pDbPrefixToken->n >= 0); if (pDbPrefixToken->n >= TSDB_DB_NAME_LEN) { // db name is too long return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); @@ -4765,7 +4764,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL* pQueryInfo->clauseLimit = pQueryInfo->limit.limit; pQueryInfo->slimit = pQuerySql->slimit; - tscTrace("%p limit:%d, offset:%" PRId64 " slimit:%d, soffset:%" PRId64, pSql, pQueryInfo->limit.limit, + tscTrace("%p limit:%" PRId64 ", offset:%" PRId64 " slimit:%" PRId64 ", soffset:%" PRId64, pSql, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->slimit.limit, pQueryInfo->slimit.offset); if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) { @@ -5255,10 +5254,10 @@ static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) { for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i); - + SSchema s; int16_t colIndex = pColIndex->colIndex; if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { - SSchema s = tGetTableNameColumnSchema(); + s = tGetTableNameColumnSchema(); type = s.type; bytes = s.bytes; name = s.name; diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c index 52a06277e3..a3e860d5aa 100644 --- a/src/client/src/tscSecondaryMerge.c +++ b/src/client/src/tscSecondaryMerge.c @@ -230,6 +230,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd if (ds == NULL) { tscError("%p failed to create merge structure", pSql); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; + tfree(pReducer); return; } @@ -266,6 +267,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd // no data actually, no need to merge result. if (idx == 0) { + tfree(pReducer); return; } @@ -282,6 +284,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); if (pReducer->pLoserTree == NULL || pRes->code != 0) { + tfree(pReducer); return; } @@ -325,7 +328,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd tfree(pReducer->pResultBuf); tfree(pReducer->pFinalRes); tfree(pReducer->prevRowOfInput); - + tfree(pReducer); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return; } @@ -685,6 +688,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; + tfree(pSchema); return pRes->code; } From 74d15ce380f752fbe00348e23cad07e9bfdce359 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 17 Jun 2020 09:18:12 +0000 Subject: [PATCH 08/10] [TD-669] wrong errno while drop column from normal table --- src/mnode/src/mnodeTable.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 93ee96a674..d5d9cbe207 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1748,9 +1748,9 @@ static int32_t mnodeFindNormalTableColumnIndex(SChildTableObj *pTable, char *col } static int32_t mnodeAddNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable; + SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; mLPrint("app:%p:%p, ctable %s, add column result:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, - tstrerror(code)); + tstrerror(code)); return code; } @@ -1808,7 +1808,7 @@ static int32_t mnodeAddNormalTableColumn(SMnodeMsg *pMsg, SSchema schema[], int3 } static int32_t mnodeDropNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable; + SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; mLPrint("app:%p:%p, ctable %s, drop column result:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, tstrerror(code)); return code; @@ -1845,7 +1845,7 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) { }; int32_t code = sdbUpdateRow(&oper); - if (code != TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_SUCCESS) { return TSDB_CODE_MND_ACTION_IN_PROGRESS; } From 33ac244c0d1e0bd397ae26fc95734453ccbd6999 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 17 Jun 2020 17:36:53 +0800 Subject: [PATCH 09/10] [TD-548] memory link while save query info --- src/mnode/src/mnodeProfile.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 31ca47929c..d035f19b6f 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -39,6 +39,7 @@ #define CONN_KEEP_TIME (tsShellActivityTimer * 3) #define CONN_CHECK_TIME (tsShellActivityTimer * 2) #define QUERY_ID_SIZE 20 +#define QUERY_STREAM_SAVE_SIZE 20 extern void *tsMnodeTmr; static SCacheObj *tsMnodeConnCache = NULL; @@ -264,16 +265,27 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi // not thread safe, need optimized int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SCMHeartBeatMsg *pHBMsg) { pConn->numOfQueries = htonl(pHBMsg->numOfQueries); - if (pConn->numOfQueries > 0 && pConn->numOfQueries < 20) { - pConn->pQueries = calloc(sizeof(SQueryDesc), pConn->numOfQueries); - memcpy(pConn->pQueries, pHBMsg->pData, pConn->numOfQueries * sizeof(SQueryDesc)); + if (pConn->numOfQueries > 0) { + if (pConn->pQueries == NULL) { + pConn->pQueries = calloc(sizeof(SQueryDesc), QUERY_STREAM_SAVE_SIZE); + } + + int32_t saveSize = MIN(QUERY_STREAM_SAVE_SIZE, pConn->numOfQueries) * sizeof(SQueryDesc); + if (saveSize > 0 && pConn->pQueries != NULL) { + memcpy(pConn->pQueries, pHBMsg->pData, saveSize); + } } pConn->numOfStreams = htonl(pHBMsg->numOfStreams); - if (pConn->numOfStreams > 0 && pConn->numOfStreams < 20) { - pConn->pStreams = calloc(sizeof(SStreamDesc), pConn->numOfStreams); - memcpy(pConn->pStreams, pHBMsg->pData + pConn->numOfQueries * sizeof(SQueryDesc), - pConn->numOfStreams * sizeof(SStreamDesc)); + if (pConn->numOfStreams > 0) { + if (pConn->pStreams == NULL) { + pConn->pStreams = calloc(sizeof(SStreamDesc), QUERY_STREAM_SAVE_SIZE); + } + + int32_t saveSize = MIN(QUERY_STREAM_SAVE_SIZE, pConn->numOfStreams) * sizeof(SStreamDesc); + if (saveSize > 0 && pConn->pStreams != NULL) { + memcpy(pConn->pStreams, pHBMsg->pData + pConn->numOfQueries * sizeof(SQueryDesc), saveSize); + } } return TSDB_CODE_SUCCESS; From d59295763b97c0a894cdaf5fad2793e05301bca4 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 17 Jun 2020 18:05:30 +0800 Subject: [PATCH 10/10] fix insert/nchar-unicode.py case. --- tests/pytest/insert/nchar-unicode.py | 1 - tests/pytest/regressiontest.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/pytest/insert/nchar-unicode.py b/tests/pytest/insert/nchar-unicode.py index 152a09723e..c417a6bca2 100644 --- a/tests/pytest/insert/nchar-unicode.py +++ b/tests/pytest/insert/nchar-unicode.py @@ -25,7 +25,6 @@ class TDTestCase: def run(self): tdSql.prepare() - tdSql.error('create table tb (ts timestamp, col nchar(1022))') tdSql.execute('create table tb (ts timestamp, col nchar(1021))') tdSql.execute("insert into tb values (now, 'taosdata')") tdSql.query("select * from tb") diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index 1fc3edf4a9..955d6aa2ef 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -13,7 +13,7 @@ python3 ./test.py -f insert/date.py python3 ./test.py -f insert/binary.py python3 ./test.py -f insert/nchar.py # python3 ./test.py -f insert/nchar-boundary.py -# python3 ./test.py -f insert/nchar-unicode.py +python3 ./test.py -f insert/nchar-unicode.py python3 ./test.py -f insert/multi.py python3 ./test.py -f insert/randomNullCommit.py