ci: adjust test && fix issue

ci: adjust test && fix issue
This commit is contained in:
liuyao 2025-03-20 17:54:20 +08:00 committed by GitHub
parent 9b7434d0ab
commit f7beaeebfe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 207 additions and 9 deletions

View File

@ -857,8 +857,12 @@ static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, S
} }
if (needHistoryTask(pStream)) { if (needHistoryTask(pStream)) {
EStreamTaskType type = (pStream->conf.trigger == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE) ? STREAM_RECALCUL_TASK EStreamTaskType type = 0;
: STREAM_HISTORY_TASK; if (pStream->conf.trigger == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE && (pStream->conf.fillHistory == 0)) {
type = STREAM_RECALCUL_TASK; // only the recalculating task
} else {
type = STREAM_HISTORY_TASK; // set the fill-history option
}
code = doAddAggTask(pStream, pMnode, plan, pEpset, pVgroup, pSnode, type, useTriggerParam); code = doAddAggTask(pStream, pMnode, plan, pEpset, pVgroup, pSnode, type, useTriggerParam);
if (code != 0) { if (code != 0) {
goto END; goto END;

View File

@ -998,6 +998,7 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
streamMetaReleaseTask(pTask->pMeta, pHTask); streamMetaReleaseTask(pTask->pMeta, pHTask);
} else if ((taskLevel == TASK_LEVEL__SOURCE) && pTask->info.hasAggTasks) { } else if ((taskLevel == TASK_LEVEL__SOURCE) && pTask->info.hasAggTasks) {
code = continueDispatchRecalculateStart((SStreamDataBlock*)pInput, pTask); code = continueDispatchRecalculateStart((SStreamDataBlock*)pInput, pTask);
pInput = NULL;
} }
} }

View File

@ -9,6 +9,7 @@
#,,n,system-test,python3 ./test.py -f 8-stream/stream_basic.py #,,n,system-test,python3 ./test.py -f 8-stream/stream_basic.py
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/continuous_window_close_interval_basic.py ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/continuous_window_close_interval_basic.py
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/continuous_window_close_interval.py ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/continuous_window_close_interval.py
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/continuous_window_close_interval_checkpoint.py
# army-test # army-test
#,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2 #,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2

View File

@ -204,7 +204,7 @@ sql insert into t1 values(1648791269001,30,2,3,1.0);
$loop_count = 0 $loop_count = 0
loop11: loop110:
sleep 200 sleep 200
@ -218,17 +218,17 @@ sql select * from streamt3;
if $rows != 30 then if $rows != 30 then
print =====rows=$rows print =====rows=$rows
goto loop11 goto loop110
endi endi
if $data[20][1] != 2 then if $data[20][1] != 2 then
print =====[20][1]=$[20][1] print =====[20][1]=$[20][1]
goto loop11 goto loop110
endi endi
if $data[29][1] != 2 then if $data[29][1] != 2 then
print =====[29][1]=$[29][1] print =====[29][1]=$[29][1]
goto loop11 goto loop110
endi endi
print step2============= print step2=============

View File

@ -153,6 +153,7 @@ sql create stream streams12 trigger continuous_window_close fill_history 1 ignor
run tsim/stream/checkTaskStatus.sim run tsim/stream/checkTaskStatus.sim
$loop_count = 0
loop3: loop3:
sleep 500 sleep 500
sql select * from streamt12 order by 1,2; sql select * from streamt12 order by 1,2;
@ -177,7 +178,7 @@ endi
sql insert into t1 values(1648791224001,2,2,3); sql insert into t1 values(1648791224001,2,2,3);
sql insert into t1 values(1648791225001,2,2,3); sql insert into t1 values(1648791225001,2,2,3);
$loop_count = 0
loop4: loop4:
sleep 500 sleep 500
sql select * from streamt12 where c3 == "t1" order by 1,2; sql select * from streamt12 where c3 == "t1" order by 1,2;
@ -239,6 +240,7 @@ print $data30 $data31 $data32 $data33 $data34
print $data40 $data41 $data42 $data43 $data44 print $data40 $data41 $data42 $data43 $data44
print $data50 $data51 $data52 $data53 $data54 print $data50 $data51 $data52 $data53 $data54
$loop_count = 0
loop5: loop5:
sleep 500 sleep 500
print sql loop5 select * from streamt3 order by 1,2; print sql loop5 select * from streamt3 order by 1,2;
@ -271,8 +273,6 @@ if $data11 != 2 then
goto loop5 goto loop5
endi endi
return 1
sql insert into t1 values(1648791221001,3,5,3); sql insert into t1 values(1648791221001,3,5,3);
sql insert into t1 values(1648791241001,3,6,3); sql insert into t1 values(1648791241001,3,6,3);

View File

@ -0,0 +1,192 @@
import sys
import threading
from util.log import *
from util.sql import *
from util.cases import *
from util.common import *
from util.cluster import *
class TDTestCase:
updatecfgDict = {"debugFlag": 135, "asynclog": 0, "checkpointinterval": 60}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tdCom = tdCom
def check_stream_all_task_status(self, stream_timeout=0):
"""check stream status
Args:
stream_name (str): stream_name
Returns:
str: status
"""
timeout = self.stream_timeout if stream_timeout is None else stream_timeout
#check stream task rows
sql_task_status = f"select * from information_schema.ins_stream_tasks where status != \"ready\";"
sql_task_all = f"select * from information_schema.ins_stream_tasks;"
#check stream task status
checktimes = 0
while checktimes <= timeout:
tdLog.notice(f"checktimes:{checktimes}")
try:
tdSql.query(sql_task_status,row_tag=True)
result_task_status_rows = tdSql.getRows()
if result_task_status_rows == 0:
tdSql.query(sql_task_all,row_tag=True)
result_task_status_rows = tdSql.getRows()
if result_task_status_rows > 0:
break
time.sleep(1)
checktimes += 1
except Exception as e:
tdLog.notice(f"Try to check stream status again, check times: {checktimes}")
checktimes += 1
tdSql.print_error_frame_info(f"status is not ready")
else:
tdLog.notice(f"it has spend {checktimes} for checking stream task status but it failed")
if checktimes == timeout:
tdSql.print_error_frame_info(f"status is ready,")
def docontinuous(
self,
interval,
watermark=None,
partition=None,
fill_value=None,
ignore_expired=0,
ignore_update=0,
use_exist_stb=None,
tag_value=None,
fill_history_value=None,
):
tdLog.info(f"*** testing stream continuous window close: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, use_exist_stb: {use_exist_stb}, fill: {fill_value}, tag_value: {tag_value} ***")
self.tdCom.case_name = sys._getframe().f_code.co_name
if watermark is not None:
self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name
self.tdCom.prepare_data(interval=interval, watermark=watermark, ext_stb=use_exist_stb, fill_history_value=fill_history_value)
tdLog.info(
f"testing stream continue_window_close finish prepare_data"
)
sqlstr = "alter local 'streamCoverage' '1'"
tdSql.query(sqlstr)
checkpointtime = 70
recalculatetime = 120
recalculatetimeStr = f"recalculate {recalculatetime}s"
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
self.delete = True
if partition == "tbname":
partition_elm_alias = self.tdCom.partition_tbname_alias
else:
partition_elm_alias = self.tdCom.partition_tag_alias
if partition == "tbname":
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
else:
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
if watermark is not None:
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
else:
watermark_value = None
# no subtable
# create stream super table and child table
tdLog.info("create stream super table and child table")
self.des_select_str = self.tdCom.stb_source_select_str
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.des_select_str} from {self.stb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="continuous_window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=stb_subtable_value, fill_value=fill_value, use_exist_stb=use_exist_stb, tag_value=tag_value, max_delay=recalculatetimeStr, fill_history_value=fill_history_value)
# # wait and check stream_task status is ready
# tdSql.query("show streams")
# self.check_stream_all_task_status(
# stream_timeout=120
# )
# insert data
start_time = self.tdCom.date_time
print(f"range count:{self.tdCom.range_count}")
for i in range(self.tdCom.range_count):
if i == 0:
if watermark is not None:
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
else:
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
else:
self.tdCom.date_time = window_close_ts + self.tdCom.offset
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
ts_value=self.tdCom.date_time+num*self.tdCom.offset
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
if i%2 == 0:
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
if self.delete and i%2 != 0:
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value)
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
start_ts = start_time
future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
end_ts = self.tdCom.time_cast(future_ts)
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts)
if watermark is not None:
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
else:
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'])
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
waitTime = checkpointtime
tdLog.info(f"sleep {waitTime} s")
time.sleep(waitTime)
tdDnodes.stoptaosd(1)
tdDnodes.starttaosd(1)
self.check_stream_all_task_status(
stream_timeout=120
)
if fill_value:
for tbname in [self.stb_name]:
if "value" in fill_value.lower():
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
additional_options = f"where ts >= {start_ts} and ts <= {end_ts}"
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
else:
for tbname in [self.stb_name]:
additional_options = f"where ts <= {end_ts}"
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', fill_value=fill_value)
tdLog.info("=====end======================================")
def run(self):
for partition in ["tbname", "t1 as t5,t2 as t11,t3 as t13, t4"]:
# for partition in ["t1 as t5,t2 as t11,t3 as t13, t4"]:
self.docontinuous(
interval=random.randint(10, 15),
partition=partition,
fill_value=None,
fill_history_value=1,
)
def stop(self):
tdLog.info("stop========================================")
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())