From 91269075c72cf4798888e608eadc798d17e5a76e Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Thu, 17 Aug 2023 19:49:07 +0800 Subject: [PATCH 01/16] test: add stream cases --- tests/pytest/util/common.py | 740 +++++++++++++++++- tests/system-test/8-stream/interval.py | 240 ++++++ tests/system-test/8-stream/scalar_function.py | 180 +++++ 3 files changed, 1129 insertions(+), 31 deletions(-) create mode 100644 tests/system-test/8-stream/interval.py create mode 100644 tests/system-test/8-stream/scalar_function.py diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 6d813a4166..1bf2668e9a 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -132,6 +132,13 @@ class TDCom: self.full_type_list = ["tinyint", "smallint", "int", "bigint", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned", "float", "double", "binary", "nchar", "bool"] self.white_list = ["statsd", "node_exporter", "collectd", "icinga2", "tcollector", "information_schema", "performance_schema"] self.Boundary = DataBoundary() + self.white_list = ["statsd", "node_exporter", "collectd", "icinga2", "tcollector", "information_schema", "performance_schema"] + self.case_name = str() + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.range_count = 5 + self.default_interval = 5 + self.stream_timeout = 12 # def init(self, conn, logSql): # # tdSql.init(conn.cursor(), logSql) @@ -745,40 +752,711 @@ class TDCom: """ return ','.join(map(lambda i: f'{gen_type}{i} {data_type}', range(count))) -def is_json(msg): - if isinstance(msg, str): - try: - json.loads(msg) - return True - except: - return False - else: - return False - -def get_path(tool="taosd"): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + def is_json(msg): + if isinstance(msg, str): + try: + json.loads(msg) + return True + except: + return False else: - projPath = selfPath[:selfPath.find("tests")] + return False - paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files or ("%s.exe"%tool) in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - paths.append(os.path.join(root, tool)) - break - if (len(paths) == 0): - return "" - return paths[0] + def get_path(tool="taosd"): + selfPath = os.path.dirname(os.path.realpath(__file__)) -def dict2toml(in_dict: dict, file:str): - if not isinstance(in_dict, dict): - return "" - with open(file, 'w') as f: - toml.dump(in_dict, f) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] + + def dict2toml(in_dict: dict, file:str): + if not isinstance(in_dict, dict): + return "" + with open(file, 'w') as f: + toml.dump(in_dict, f) + + # stream + def create_stream(self, stream_name, des_table, source_sql, trigger_mode=None, watermark=None, max_delay=None, ignore_expired=None, ignore_update=None, subtable_value=None, fill_value=None, fill_history_value=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + if subtable_value is None: + subtable = "" + else: + subtable = f'subtable({subtable_value})' + + if fill_value is None: + fill = "" + else: + fill = f'fill({fill_value})' + + if fill_history_value is None: + fill_history = "" + else: + fill_history = f'fill_history {fill_history_value}' + + if use_exist_stb: + if stb_field_name_value is None: + stb_field_name = "" + else: + stb_field_name = f'({stb_field_name_value})' + + if tag_value is None: + tags = "" + else: + tags = f'tags({tag_value})' + else: + stb_field_name = "" + tags = "" + + + if trigger_mode is None: + stream_options = "" + if watermark is not None: + stream_options = f'watermark {watermark}' + if ignore_expired: + stream_options += f" ignore expired {ignore_expired}" + else: + stream_options += f" ignore expired 0" + if ignore_update: + stream_options += f" ignore update {ignore_update}" + else: + stream_options += f" ignore update 0" + if not use_except: + tdSql.execute(f'create stream if not exists {stream_name} trigger at_once {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};') + return None + else: + return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};' + else: + if watermark is None: + if trigger_mode == "max_delay": + stream_options = f'trigger {trigger_mode} {max_delay}' + else: + stream_options = f'trigger {trigger_mode}' + else: + if trigger_mode == "max_delay": + stream_options = f'trigger {trigger_mode} {max_delay} watermark {watermark}' + else: + stream_options = f'trigger {trigger_mode} watermark {watermark}' + if ignore_expired: + stream_options += f" ignore expired {ignore_expired}" + else: + stream_options += f" ignore expired 0" + + if ignore_update: + stream_options += f" ignore update {ignore_update}" + else: + stream_options += f" ignore update 0" + if not use_except: + tdSql.execute(f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};') + return None + else: + return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};' + + + def drop_all_streams(self): + tdSql.query("show streams") + stream_name_list = list(map(lambda x: x[0], tdSql.queryResult)) + for stream_name in stream_name_list: + tdSql.execute(f'drop stream if exists {stream_name};') + + def drop_db(self, dbname="test"): + if dbname[0].isdigit(): + tdSql.execute(f'drop database if exists `{dbname}`') + else: + tdSql.execute(f'drop database if exists {dbname}') + + def drop_all_db(self): + tdSql.query("show databases;") + db_list = list(map(lambda x: x[0], tdSql.queryResult)) + for dbname in db_list: + if dbname not in self.white_list and "telegraf" not in dbname: + tdSql.execute(f'drop database if exists `{dbname}`') + + def time_cast(self, time_value, split_symbol="+"): + ts_value = str(time_value).split(split_symbol)[0] + if split_symbol in str(time_value): + ts_value_offset = str(time_value).split(split_symbol)[1] + else: + ts_value_offset = "0s" + return f'cast({ts_value} as timestamp){split_symbol}{ts_value_offset}' + + def clean_env(self): + self.drop_all_streams() + self.drop_all_db() + + def set_precision_offset(self, precision): + if precision == "ms": + self.offset = 1000 + elif precision == "us": + self.offset = 1000000 + elif precision == "ns": + self.offset = 1000000000 + else: + pass + + def genTs(self, precision="ms", ts="", protype="taosc", ns_tag=None): + """ + protype = "taosc" or "restful" + gen ts and datetime + """ + if precision == "ns": + if ts == "" or ts is None: + ts = time.time_ns() + else: + ts = ts + if ns_tag is None: + dt = ts + else: + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + if protype == "restful": + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + else: + if ts == "" or ts is None: + ts = time.time() + else: + ts = ts + if precision == "ms" or precision is None: + ts = int(round(ts * 1000)) + dt = datetime.fromtimestamp(ts // 1000) + if protype == "taosc": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + '000' + elif protype == "restful": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + else: + pass + elif precision == "us": + ts = int(round(ts * 1000000)) + dt = datetime.fromtimestamp(ts // 1000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000)).zfill(6) + return ts, dt + + def sgen_column_type_str(self, column_elm_list): + self.column_type_str = "" + if column_elm_list is None: + self.column_type_str = self.gen_default_column_str() + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + self.column_type_str += f'{self.default_colname_prefix}{self.default_column_index_start_num} {column_elm["type"]}, ' + if column_elm["type"] in ["varchar", "binary", "nchar"]: + self.column_type_str = self.column_type_str.rstrip()[:-1] + f'({column_elm["len"]}), ' + self.default_column_index_start_num += 1 + else: + continue + self.column_type_str = self.default_colts_name + " timestamp, " + self.column_type_str.rstrip()[:-1] + + def sgen_tag_type_str(self, tag_elm_list): + self.tag_type_str = "" + if tag_elm_list is None: + self.tag_type_str = self.gen_default_tag_str() + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + self.tag_type_str += f'{self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_elm["type"]}, ' + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + self.tag_type_str = self.tag_type_str.rstrip()[:-1] + f'({tag_elm["len"]}), ' + self.default_tag_index_start_num += 1 + else: + continue + self.tag_type_str = self.tag_type_str.rstrip()[:-1] + if self.need_tagts: + self.tag_type_str = self.default_tagts_name + " timestamp, " + self.tag_type_str + def sgen_tag_value_list(self, tag_elm_list, ts_value=None): + if self.need_tagts: + self.ts_value = self.genTs()[0] + if ts_value is not None: + self.ts_value = ts_value + + if tag_elm_list is None: + self.tag_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + self.tag_value_list.append(self.gen_random_type_value(tag_elm["type"], tag_elm["len"], self.default_varchar_datatype, tag_elm["len"], self.default_nchar_datatype)) + else: + self.tag_value_list.append(self.gen_random_type_value(tag_elm["type"], "", "", "", "")) + else: + continue + # if self.need_tagts and self.ts_value is not None and len(str(self.ts_value)) > 0: + if self.need_tagts: + self.tag_value_list = [self.ts_value] + self.tag_value_list + + def screateDb(self, dbname="test", drop_db=True, **kwargs): + tdLog.info("creating db ...") + db_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + if param == "precision": + db_params += f'{param} "{value}" ' + else: + db_params += f'{param} {value} ' + if drop_db: + self.drop_db(dbname) + tdSql.execute(f'create database if not exists {dbname} {db_params}') + tdSql.execute(f'use {dbname}') + + def screate_stable(self, dbname=None, stbname="stb", use_name="table", column_elm_list=None, tag_elm_list=None, + need_tagts=False, count=1, default_stbname_prefix="stb", default_stbname_index_start_num=1, + default_column_index_start_num=1, default_tag_index_start_num=1, **kwargs): + tdLog.info("creating stable ...") + if dbname is not None: + self.dbname = dbname + self.need_tagts = need_tagts + self.default_stbname_prefix = default_stbname_prefix + self.default_stbname_index_start_num = default_stbname_index_start_num + self.default_column_index_start_num = default_column_index_start_num + self.default_tag_index_start_num = default_tag_index_start_num + stb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + stb_params += f'{param} "{value}" ' + self.sgen_column_type_str(column_elm_list) + self.sgen_tag_type_str(tag_elm_list) + if self.dbname is not None: + self.stb_name = f'{self.dbname}.{stbname}' + else: + self.stb_name = stbname + if int(count) <= 1: + create_stable_sql = f'create {use_name} {self.stb_name} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + tdSql.execute(create_stable_sql) + else: + for _ in range(count): + create_stable_sql = f'create {use_name} {self.dbname}.{default_stbname_prefix}{default_stbname_index_start_num} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + default_stbname_index_start_num += 1 + tdSql.execute(create_stable_sql) + + def screate_ctable(self, dbname=None, stbname=None, ctbname="ctb", use_name="table", tag_elm_list=None, ts_value=None, count=1, default_varchar_datatype="letters", default_nchar_datatype="letters", default_ctbname_prefix="ctb", default_ctbname_index_start_num=1, **kwargs): + tdLog.info("creating childtable ...") + self.default_varchar_datatype = default_varchar_datatype + self.default_nchar_datatype = default_nchar_datatype + self.default_ctbname_prefix = default_ctbname_prefix + self.default_ctbname_index_start_num = default_ctbname_index_start_num + ctb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + ctb_params += f'{param} "{value}" ' + self.sgen_tag_value_list(tag_elm_list, ts_value) + tag_value_str = "" + # tag_value_str = ", ".join(str(v) for v in self.tag_value_list) + for tag_value in self.tag_value_list: + if isinstance(tag_value, str): + tag_value_str += f'"{tag_value}", ' + else: + tag_value_str += f'{tag_value}, ' + tag_value_str = tag_value_str.rstrip()[:-1] + if dbname is not None: + self.dbname = dbname + self.ctb_name = f'{self.dbname}.{ctbname}' + else: + self.ctb_name = ctbname + if stbname is not None: + self.stb_name = stbname + if int(count) <= 1: + create_ctable_sql = f'create {use_name} {self.ctb_name} using {self.stb_name} tags ({tag_value_str}) {ctb_params};' + tdSql.execute(create_ctable_sql) + else: + for _ in range(count): + create_stable_sql = f'create {use_name} {self.dbname}.{default_ctbname_prefix}{default_ctbname_index_start_num} using {self.stb_name} tags ({tag_value_str}) {ctb_params};' + default_ctbname_index_start_num += 1 + tdSql.execute(create_stable_sql) + + def sgen_column_value_list(self, column_elm_list, need_null, ts_value=None): + self.column_value_list = list() + self.ts_value = self.genTs()[0] + if ts_value is not None: + self.ts_value = ts_value + + if column_elm_list is None: + self.column_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if column_elm["type"] in ["varchar", "binary", "nchar"]: + self.column_value_list.append(self.gen_random_type_value(column_elm["type"], column_elm["len"], self.default_varchar_datatype, column_elm["len"], self.default_nchar_datatype)) + else: + self.column_value_list.append(self.gen_random_type_value(column_elm["type"], "", "", "", "")) + else: + continue + if need_null: + for i in range(int(len(self.column_value_list)/2)): + index_num = random.randint(0, len(self.column_value_list)-1) + self.column_value_list[index_num] = None + self.column_value_list = [self.ts_value] + self.column_value_list + + def screate_table(self, dbname=None, tbname="tb", use_name="table", column_elm_list=None, + count=1, default_tbname_prefix="tb", default_tbname_index_start_num=1, + default_column_index_start_num=1, **kwargs): + tdLog.info("creating table ...") + if dbname is not None: + self.dbname = dbname + self.default_tbname_prefix = default_tbname_prefix + self.default_tbname_index_start_num = default_tbname_index_start_num + self.default_column_index_start_num = default_column_index_start_num + tb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + tb_params += f'{param} "{value}" ' + self.sgen_column_type_str(column_elm_list) + if self.dbname is not None: + self.tb_name = f'{self.dbname}.{tbname}' + else: + self.tb_name = tbname + if int(count) <= 1: + create_table_sql = f'create {use_name} {self.tb_name} ({self.column_type_str}) {tb_params};' + tdSql.execute(create_table_sql) + else: + for _ in range(count): + create_table_sql = f'create {use_name} {self.dbname}.{default_tbname_prefix}{default_tbname_index_start_num} ({self.column_type_str}) {tb_params};' + default_tbname_index_start_num += 1 + tdSql.execute(create_table_sql) + + def sinsert_rows(self, dbname=None, tbname=None, column_ele_list=None, ts_value=None, count=1, need_null=False): + tdLog.info("stream inserting ...") + if dbname is not None: + self.dbname = dbname + if tbname is not None: + self.tb_name = f'{self.dbname}.{tbname}' + else: + if tbname is not None: + self.tb_name = tbname + + self.sgen_column_value_list(column_ele_list, need_null, ts_value) + # column_value_str = ", ".join(str(v) for v in self.column_value_list) + column_value_str = "" + for column_value in self.column_value_list: + if column_value is None: + column_value_str += 'Null, ' + elif isinstance(column_value, str) and "+" not in column_value and "-" not in column_value: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + if int(count) <= 1: + insert_sql = f'insert into {self.tb_name} values ({column_value_str});' + tdSql.execute(insert_sql) + else: + for num in range(count): + ts_value = self.genTs()[0] + self.sgen_column_value_list(column_ele_list, need_null, f'{ts_value}+{num}s') + column_value_str = "" + for column_value in self.column_value_list: + if column_value is None: + column_value_str += 'Null, ' + elif isinstance(column_value, str) and "+" not in column_value: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + insert_sql = f'insert into {self.tb_name} values ({column_value_str});' + print(insert_sql) + tdSql.execute(insert_sql) + + def sdelete_rows(self, dbname=None, tbname=None, start_ts=None, end_ts=None, ts_key=None): + if dbname is not None: + self.dbname = dbname + if tbname is not None: + self.tb_name = f'{self.dbname}.{tbname}' + else: + if tbname is not None: + self.tb_name = tbname + if ts_key is None: + ts_col_name = self.default_colts_name + else: + ts_col_name = ts_key + + base_del_sql = f'delete from {self.tb_name} ' + if end_ts is not None: + if ":" in start_ts and "-" in start_ts: + start_ts = f"{start_ts}" + if ":" in end_ts and "-" in end_ts: + end_ts = f"{end_ts}" + base_del_sql += f'where {ts_col_name} between {start_ts} and {end_ts};' + else: + if start_ts is not None: + if ":" in start_ts and "-" in start_ts: + start_ts = f"{start_ts}" + base_del_sql += f'where {ts_col_name} = {start_ts};' + tdSql.execute(base_del_sql) + + + def check_stream_field_type(self, sql, input_function): + tdSql.query(sql) + res = tdSql.queryResult + if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]: + tdSql.checkEqual(res[1][1], "DOUBLE") + tdSql.checkEqual(res[2][1], "DOUBLE") + elif input_function in ["lower", "ltrim", "rtrim", "upper"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "VARCHAR") + tdSql.checkEqual(res[3][1], "NCHAR") + elif input_function in ["char_length", "length"]: + tdSql.checkEqual(res[1][1], "BIGINT") + tdSql.checkEqual(res[2][1], "BIGINT") + tdSql.checkEqual(res[3][1], "BIGINT") + elif input_function in ["concat", "concat_ws"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "NCHAR") + tdSql.checkEqual(res[3][1], "NCHAR") + tdSql.checkEqual(res[4][1], "NCHAR") + elif input_function in ["substr"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "VARCHAR") + tdSql.checkEqual(res[3][1], "VARCHAR") + tdSql.checkEqual(res[4][1], "NCHAR") + else: + tdSql.checkEqual(res[1][1], "INT") + tdSql.checkEqual(res[2][1], "DOUBLE") + + def round_handle(self, input_list): + tdLog.info("round rows ...") + final_list = list() + for i in input_list: + tmpl = list() + for j in i: + if type(j) != datetime and type(j) != str: + tmpl.append(round(j, 1)) + else: + tmpl.append(j) + final_list.append(tmpl) + return final_list + + def float_handle(self, input_list): + tdLog.info("float rows ...") + final_list = list() + for i in input_list: + tmpl = list() + for j_i,j_v in enumerate(i): + if type(j_v) != datetime and j_v is not None and str(j_v).isdigit() and j_i <= 12: + tmpl.append(float(j_v)) + else: + tmpl.append(j_v) + final_list.append(tuple(tmpl)) + return final_list + + def cast_query_data(self, query_data): + tdLog.info("cast query data ...") + col_type_list = self.column_type_str.split(',') + tag_type_list = self.tag_type_str.split(',') + col_tag_type_list = col_type_list + tag_type_list + nl = list() + for query_data_t in query_data: + query_data_l = list(query_data_t) + for i,v in enumerate(query_data_l): + if v is not None: + if " ".join(col_tag_type_list[i].strip().split(" ")[1:]) == "nchar(256)": + tdSql.query(f'select cast("{v}" as binary(256))') + else: + tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})') + query_data_l[i] = tdSql.queryResult[0][0] + else: + query_data_l[i] = v + nl.append(tuple(query_data_l)) + return nl + + def check_query_data(self, sql1, sql2, sorted=False, fill_value=None, tag_value_list=None, defined_tag_count=None, partition=True, use_exist_stb=False, subtable=None, reverse_check=False): + tdLog.info("checking query data ...") + if tag_value_list: + dvalue = len(self.tag_type_str.split(',')) - defined_tag_count + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + new_list = list() + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + + latency = 0 + if sorted: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if not reverse_check: + while res1 != res2: + tdLog.info("query retrying ...") + new_list = list() + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + # res2 = tdSql.queryResult + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + if sorted or tag_value_list: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if latency == 0: + return False + tdSql.checkEqual(res1, res2) + # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + else: + while res1 == res2: + tdLog.info("query retrying ...") + new_list = list() + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + # res2 = tdSql.queryResult + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + if sorted or tag_value_list: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if latency == 0: + return False + tdSql.checkNotEqual(res1, res2) + # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + + + def prepare_data(self, interval=None, watermark=None, session=None, state_window=None, state_window_max=127, interation=3, range_count=None, precision="ms", fill_history_value=0, ext_stb=None): + self.clean_env() + self.dataDict = { + "stb_name" : f"{self.case_name}_stb", + "ctb_name" : f"{self.case_name}_ct1", + "tb_name" : f"{self.case_name}_tb1", + "ext_stb_name" : f"ext_{self.case_name}_stb", + "ext_ctb_name" : f"ext_{self.case_name}_ct1", + "ext_tb_name" : f"ext_{self.case_name}_tb1", + "interval" : interval, + "watermark": watermark, + "session": session, + "state_window": state_window, + "state_window_max": state_window_max, + "iteration": interation, + "range_count": range_count, + "start_ts": 1655903478508, + } + if range_count is not None: + self.range_count = range_count + if precision is not None: + self.precision = precision + self.set_precision_offset(self.precision) + + self.stb_name = self.dataDict["stb_name"] + self.ctb_name = self.dataDict["ctb_name"] + self.tb_name = self.dataDict["tb_name"] + self.ext_stb_name = self.dataDict["ext_stb_name"] + self.ext_ctb_name = self.dataDict["ext_ctb_name"] + self.ext_tb_name = self.dataDict["ext_tb_name"] + self.stb_stream_des_table = f'{self.stb_name}{self.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.des_table_suffix}' + self.ext_stb_stream_des_table = f'{self.ext_stb_name}{self.des_table_suffix}' + self.ext_ctb_stream_des_table = f'{self.ext_ctb_name}{self.des_table_suffix}' + self.ext_tb_stream_des_table = f'{self.ext_tb_name}{self.des_table_suffix}' + self.date_time = self.genTs(precision=self.precision)[0] + + self.screateDb(dbname=self.dbname, precision=self.precision) + self.screate_stable(dbname=self.dbname, stbname=self.stb_name) + self.screate_ctable(dbname=self.dbname, stbname=self.stb_name, ctbname=self.ctb_name) + self.screate_table(dbname=self.dbname, tbname=self.tb_name) + if fill_history_value == 1: + for i in range(self.range_count): + ts_value = str(self.date_time)+f'-{self.default_interval*(i+1)}s' + self.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if i == 1: + self.record_history_ts = ts_value + if ext_stb: + self.screate_stable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table) + self.screate_ctable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table, ctbname=self.ext_ctb_stream_des_table) + self.screate_table(dbname=self.dbname, tbname=self.ext_tb_stream_des_table) tdCom = TDCom() diff --git a/tests/system-test/8-stream/interval.py b/tests/system-test/8-stream/interval.py new file mode 100644 index 0000000000..92cd28c44d --- /dev/null +++ b/tests/system-test/8-stream/interval.py @@ -0,0 +1,240 @@ +import sys +import time +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + self.subtable = True + self.partition_tbname_alias = "ptn_alias" if self.subtable else "" + self.partition_col_alias = "pcol_alias" if self.subtable else "" + self.partition_tag_alias = "ptag_alias" if self.subtable else "" + self.partition_expression_alias = "pexp_alias" if self.subtable else "" + self.stb_name = str() + self.ctb_name = str() + self.tb_name = str() + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.subtable_prefix = "prefix_" if self.subtable else "" + self.subtable_suffix = "_suffix" if self.subtable else "" + self.stb_stream_des_table = str() + self.ctb_stream_des_table = str() + self.tb_stream_des_table = str() + self.downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list))) + self.stb_source_select_str = ','.join(self.downsampling_function_list) + self.tb_source_select_str = ','.join(self.downsampling_function_list[0:15]) + + def at_once_interval(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, case_when=None): + tdLog.info(f"testing stream at_once+interval: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.des_table_suffix}' + self.tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list[0:15]))) + + if partition == "tbname": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.partition_tbname_alias + + partition_elm_alias = self.partition_tbname_alias + elif partition == "c1": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.partition_col_alias + partition_elm_alias = self.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.partition_expression_alias + elif partition is None: + partition_elm_alias = '"no_partition"' + else: + partition_elm_alias = self.partition_tag_alias + if partition == "tbname" or partition is None: + if case_when: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", {stream_case_when_partition}), "{self.subtable_suffix}")' if self.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.subtable_prefix}", {stream_case_when_partition}), "{self.subtable_suffix}")' if self.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.subtable_prefix}", {stream_case_when_partition}), "{self.subtable_suffix}")' if self.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", {partition_elm_alias}), "{self.subtable_suffix}")' if self.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.subtable_prefix}", {partition_elm_alias}), "{self.subtable_suffix}")' if self.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.subtable_prefix}", {partition_elm_alias}), "{self.subtable_suffix}")' if self.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None + if partition: + partition_elm = f'partition by {partition} {partition_elm_alias}' + else: + partition_elm = "" + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + start_time = self.tdCom.date_time + for i in range(self.tdCom.range_count): + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value) + if i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value) + if i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + if partition: + partition_elm = f'partition by {partition}' + else: + partition_elm = "" + + if not fill_value: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.stb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, {self.tb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + + if self.subtable: + for tname in [self.stb_name, self.ctb_name]: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.subtable_prefix}{abs(c1_value[1])}{self.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{tname}_{self.subtable_prefix}no_partition{self.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.subtable_prefix}{abs_c1_value}{self.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.subtable_prefix}{self.ctb_name}{self.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.subtable_prefix}{abs(c1_value[1])}{self.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.subtable_prefix}no_partition{self.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.subtable_prefix}{abs_c1_value}{self.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.subtable_prefix}{self.tb_name}{self.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + if fill_value: + end_date_time = self.tdCom.date_time + final_range_count = self.tdCom.range_count + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + self.tdCom.date_time = start_time + # update + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + self.tdCom.date_time = start_time + for i in range(self.tdCom.range_count): + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + self.tdCom.date_time += 1 + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.fill_stb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.fill_stb_output_select_str} from {tbname}{self.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.fill_tb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.fill_tb_output_select_str} from {tbname}{self.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.fill_stb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.fill_stb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.fill_tb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.fill_tb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + + def run(self): + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition="c1", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition="abs(c1)", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/scalar_function.py b/tests/system-test/8-stream/scalar_function.py new file mode 100644 index 0000000000..6583dcd8f6 --- /dev/null +++ b/tests/system-test/8-stream/scalar_function.py @@ -0,0 +1,180 @@ +import sys +import time +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def scalar_function(self, partition="tbname", fill_history_value=None): + tdLog.info(f"testing stream scalar funtion partition: {partition}, fill_history_value: {fill_history_value}") + self.tdCom.case_name = sys._getframe().f_code.co_name + tdLog.info("preparing data ...") + self.tdCom.prepare_data(fill_history_value=fill_history_value) + # return + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20));') + if fill_history_value is None: + fill_history = "" + else: + tdLog.info("inserting fill_history data ...") + fill_history = f'fill_history {fill_history_value}' + for i in range(self.tdCom.range_count): + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{i}s, 100, -100.1, "hebei", Null, "Bigdata");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{i}s, 100, -100.1, "heBei", Null, "Bigdata");') + + # self.tdCom.write_latency(self.case_name) + math_function_list = ["abs", "acos", "asin", "atan", "ceil", "cos", "floor", "log", "pow", "round", "sin", "sqrt", "tan"] + string_function_list = ["char_length", "concat", "concat_ws", "length", "lower", "ltrim", "rtrim", "substr", "upper"] + for math_function in math_function_list: + tdLog.info(f"testing function {math_function} ...") + tdLog.info(f"creating stream for function {math_function} ...") + if math_function in ["log", "pow"]: + tdSql.execute(f'create stream stb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_stb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_ctb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_ct1;') + tdSql.execute(f'create stream tb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_tb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_tb;') + else: + tdSql.execute(f'create stream stb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_stb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_ctb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_ct1;') + tdSql.execute(f'create stream tb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_tb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_tb;') + self.tdCom.check_stream_field_type(f"describe output_{math_function}_stb", math_function) + self.tdCom.check_stream_field_type(f"describe output_{math_function}_ctb", math_function) + self.tdCom.check_stream_field_type(f"describe output_{math_function}_tb", math_function) + for tbname in ["scalar_ct1", "scalar_tb"]: + tdLog.info(f"function {math_function}: inserting data for tb --- {tbname} ...") + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}, 100, 100.1, "beijing", "taos", "Taos");') + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");') + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+2s, 0, Null, "hebei", "TDengine", Null);') + for i in range(self.tdCom.range_count): + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 100, -100.1, "hebei", Null, "Bigdata");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 100, -100.1, "heBei", Null, "Bigdata");') + if i%2 == 0: + tdLog.info(f"function {math_function}: update testing ...") + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");') + else: + tdLog.info(f"function {math_function}: delete testing ...") + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + tdSql.execute(f'delete from scalar_ct1 where ts = {dt};') + tdSql.execute(f'delete from scalar_tb where ts = {dt};') + + if fill_history_value: + tdLog.info(f"function {math_function}: disorder testing ...") + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");') + dt = f'cast({self.tdCom.date_time-(self.tdCom.range_count-1)} as timestamp)' + tdSql.execute(f'delete from scalar_ct1 where ts = {dt};') + tdSql.execute(f'delete from scalar_tb where ts = {dt};') + if math_function == "log" or math_function == "pow": + tdLog.info(f"function {math_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_stb order by ts;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_stb partition by {partition} order by ts;') + self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_ctb;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_tb;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_tb;') + else: + tdLog.info(f"function {math_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_stb order by ts;', f'select {math_function}(c1), {math_function}(c2) from scalar_stb partition by {partition} order by ts;') + self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_ctb;', f'select {math_function}(c1), {math_function}(c2) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_tb;', f'select {math_function}(c1), {math_function}(c2) from scalar_tb;') + tdSql.execute(f'drop stream if exists stb_{math_function}_stream') + tdSql.execute(f'drop stream if exists ctb_{math_function}_stream') + tdSql.execute(f'drop stream if exists tb_{math_function}_stream') + + for string_function in string_function_list: + tdLog.info(f"testing function {string_function} ...") + tdLog.info(f"creating stream for function {string_function} ...") + if string_function == "concat": + tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_ct1;') + tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_tb;') + elif string_function == "concat_ws": + tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_ct1;') + tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_tb;') + elif string_function == "substr": + tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_ct1;') + tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_tb;') + else: + tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_ct1;') + tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_tb;') + self.tdCom.check_stream_field_type(f"describe output_{string_function}_stb", string_function) + self.tdCom.check_stream_field_type(f"describe output_{string_function}_ctb", string_function) + self.tdCom.check_stream_field_type(f"describe output_{string_function}_tb", string_function) + for tbname in ["scalar_ct1", "scalar_tb"]: + tdLog.info(f"function {string_function}: inserting data for tb --- {tbname} ...") + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}, 100, 100.1, "beijing", "taos", "Taos");') + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");') + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+2s, 0, Null, "hebei", "TDengine", Null);') + + + for i in range(self.tdCom.range_count): + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 100, -100.1, "hebei", Null, "Bigdata");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 100, -100.1, "heBei", Null, "Bigdata");') + if i%2 == 0: + tdLog.info(f"function {string_function}: update testing...") + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");') + else: + tdLog.info(f"function {string_function}: delete testing ...") + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + tdSql.execute(f'delete from scalar_ct1 where ts = {dt};') + tdSql.execute(f'delete from scalar_tb where ts = {dt};') + + if fill_history_value: + tdLog.info(f"function {string_function}: disorder testing ...") + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");') + dt = f'cast({self.tdCom.date_time-(self.tdCom.range_count-1)} as timestamp)' + tdSql.execute(f'delete from scalar_ct1 where ts = {dt};') + tdSql.execute(f'delete from scalar_tb where ts = {dt};') + + + if string_function == "concat": + tdLog.info(f"function {string_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_stb order by ts;') + self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_ctb;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_tb;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_tb;') + elif string_function == "concat_ws": + tdLog.info(f"function {string_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_stb order by ts;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_stb order by ts;') + self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_ctb;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_tb;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_tb;') + elif string_function == "substr": + tdLog.info(f"function {string_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_stb order by ts;') + self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_ctb;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_tb;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_tb;') + else: + tdLog.info(f"function {string_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_stb order by ts;') + self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_ctb;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_tb;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_tb;') + + tdSql.execute(f'drop stream if exists stb_{string_function}_stream') + tdSql.execute(f'drop stream if exists ctb_{string_function}_stream') + tdSql.execute(f'drop stream if exists tb_{string_function}_stream') + + def run(self): + self.scalar_function(partition="tbname", fill_history_value=1) + self.scalar_function(partition="tbname,c1,t1", fill_history_value=1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From d8659d7a374d847a77fd30bb5d4e6d9bcad9739d Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Mon, 21 Aug 2023 18:13:38 +0800 Subject: [PATCH 02/16] test: update --- .../system-test/8-stream/at_once_interval.py | 219 +++++++++++++++ tests/system-test/8-stream/at_once_session.py | 223 +++++++++++++++ .../8-stream/at_once_state_window.py | 144 ++++++++++ tests/system-test/8-stream/scalar_function.py | 5 +- .../8-stream/window_close_interval.py | 256 ++++++++++++++++++ .../8-stream/window_close_session.py | 127 +++++++++ .../8-stream/window_close_state_window.py | 73 +++++ 7 files changed, 1043 insertions(+), 4 deletions(-) create mode 100644 tests/system-test/8-stream/at_once_interval.py create mode 100644 tests/system-test/8-stream/at_once_session.py create mode 100644 tests/system-test/8-stream/at_once_state_window.py create mode 100644 tests/system-test/8-stream/window_close_interval.py create mode 100644 tests/system-test/8-stream/window_close_session.py create mode 100644 tests/system-test/8-stream/window_close_state_window.py diff --git a/tests/system-test/8-stream/at_once_interval.py b/tests/system-test/8-stream/at_once_interval.py new file mode 100644 index 0000000000..7e082b9ef1 --- /dev/null +++ b/tests/system-test/8-stream/at_once_interval.py @@ -0,0 +1,219 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def at_once_interval(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, case_when=None): + tdLog.info(f"*** testing stream at_once+interval: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, fill: {fill_value}, delete: {delete}, case_when: {case_when} ***") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + if partition == "tbname": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_tbname_alias + + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_col_alias + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + elif partition is None: + partition_elm_alias = '"no_partition"' + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname" or partition is None: + if case_when: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + if partition: + partition_elm = f'partition by {partition} {partition_elm_alias}' + else: + partition_elm = "" + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + start_time = self.tdCom.date_time + for i in range(self.tdCom.range_count): + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value) + if i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value) + if i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + if partition: + partition_elm = f'partition by {partition}' + else: + partition_elm = "" + + if not fill_value: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + + if self.tdCom.subtable: + for tname in [self.stb_name, self.ctb_name]: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + if fill_value: + end_date_time = self.tdCom.date_time + final_range_count = self.tdCom.range_count + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + self.tdCom.date_time = start_time + # update + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + self.tdCom.date_time = start_time + for i in range(self.tdCom.range_count): + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + self.tdCom.date_time += 1 + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + + def run(self): + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition="c1", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition="abs(c1)", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end') + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL") + for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value) + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/at_once_session.py b/tests/system-test/8-stream/at_once_session.py new file mode 100644 index 0000000000..9a253a187f --- /dev/null +++ b/tests/system-test/8-stream/at_once_session.py @@ -0,0 +1,223 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def at_once_session(self, session, ignore_expired=None, ignore_update=None, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True): + tdLog.info(f"*** testing stream at_once+interval: session: {session}, ignore_expired: {ignore_expired}, ignore_update: {ignore_update}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, case_when: {case_when}, subtable: {subtable} ***") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(session=session, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + if partition == "tbname": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_tbname_alias + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + if subtable: + partition_elm_alias = self.tdCom.partition_expression_alias + else: + partition_elm_alias = "constant" + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname" or subtable is None: + if case_when: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + if subtable: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + if 'abs' in partition: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + else: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=ctb_subtable_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=tb_subtable_value, fill_history_value=fill_history_value) + for i in range(self.tdCom.range_count): + ctb_name = self.tdCom.get_long_name() + self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name) + + if i == 0: + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session) + else: + self.tdCom.date_time = window_close_ts + 1 + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session) + if i == 0: + record_window_close_ts = window_close_ts + for ts_value in [self.tdCom.date_time, window_close_ts]: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True) + if self.delete and i%2 != 0: + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt) + ts_value += 1 + + # check result + if partition != "tbname": + for colname in self.tdCom.partition_by_downsampling_function_list: + if "first" not in colname and "last" not in colname: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True) + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;') + else: + for tbname in [self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True) + + if self.tdCom.disorder: + if ignore_expired: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res2 = tdSql.queryResult + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s') + tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + res1 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res1 = tdSql.queryResult + tdSql.checkEqual(res1, res2) + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res2 = tdSql.queryResult + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s') + tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + res1 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res1 = tdSql.queryResult + tdSql.checkEqual(res1, res2) + else: + if ignore_update: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res2 = tdSql.queryResult + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts) + tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + res1 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res2 = tdSql.queryResult + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts) + tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + res1 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts) + if partition != "tbname": + for colname in self.tdCom.partition_by_downsampling_function_list: + if "first" not in colname and "last" not in colname: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True) + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;') + else: + for tbname in [self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True) + + if fill_history_value: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.record_history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.record_history_ts) + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-")) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-")) + + if self.tdCom.subtable: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if c1_value[1] is not None: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + if subtable: + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + else: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if c1_value[1] is not None: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + if subtable: + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + else: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + + + def run(self): + self.at_once_session(session=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, delete=True, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end') + for subtable in [None, True]: + self.at_once_session(session=random.randint(10, 15), subtable=subtable, partition="abs(c1)") + for ignore_expired in [None, 0, 1]: + for fill_history_value in [None, 1]: + self.at_once_session(session=random.randint(10, 15), ignore_expired=ignore_expired, fill_history_value=fill_history_value) + for fill_history_value in [None, 1]: + self.at_once_session(session=random.randint(10, 15), partition="tbname", delete=True, fill_history_value=fill_history_value) + self.at_once_session(session=random.randint(10, 15), partition="c1", delete=True, fill_history_value=fill_history_value) + self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, fill_history_value=fill_history_value) + self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, subtable=None, fill_history_value=fill_history_value) + self.at_once_session(session=random.randint(10, 15), ignore_update=1, fill_history_value=fill_history_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/at_once_state_window.py b/tests/system-test/8-stream/at_once_state_window.py new file mode 100644 index 0000000000..fa9f4ddd78 --- /dev/null +++ b/tests/system-test/8-stream/at_once_state_window.py @@ -0,0 +1,144 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def at_once_state_window(self, state_window, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True): + tdLog.info(f"*** testing stream at_once+interval: state_window: {state_window}, partition: {partition}, fill_history: {fill_history_value}, case_when: {case_when}***, delete: {delete}") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(state_window=state_window, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1" and subtable is not None: + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "c1" and subtable is None: + partition_elm_alias = 'constant' + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname" or subtable is None: + if partition == "tbname": + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + if 'abs' in partition: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + else: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + state_window_col_name = self.tdCom.dataDict["state_window"] + if case_when: + stream_state_window = case_when + else: + stream_state_window = state_window_col_name + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value) + range_times = self.tdCom.range_count + state_window_max = self.tdCom.dataDict['state_window_max'] + for i in range(range_times): + state_window_value = random.randint(int((i)*state_window_max/range_times), int((i+1)*state_window_max/range_times)) + for i in range(2, range_times+3): + tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.tdCom.update and i%2 == 0: + tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.delete and i%2 != 0: + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + tdSql.execute(f'delete from {self.ctb_name} where ts = {dt}') + tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.tdCom.update and i%2 == 0: + tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.delete and i%2 != 0: + tdSql.execute(f'delete from {self.tb_name} where ts = {dt}') + self.tdCom.date_time += 1 + + # for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True) + + if fill_history_value: + self.tdCom.update_delete_history_data(self.delete) + + if self.tdCom.subtable: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + if subtable: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + else: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + return + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + if subtable: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + else: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + return + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + + def run(self): + self.at_once_state_window(state_window="c2", partition="tbname", case_when="case when c1 < 0 then c1 else c2 end") + self.at_once_state_window(state_window="c1", partition="tbname", case_when="case when c1 >= 0 then c1 else c2 end") + for fill_history_value in [None, 1]: + self.at_once_state_window(state_window="c1", partition="tbname", fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="c1", fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="abs(c1)", fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="tbname", delete=True, fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="c1", delete=True, fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="abs(c1)", delete=True, fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="c1", subtable=None, fill_history_value=fill_history_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/scalar_function.py b/tests/system-test/8-stream/scalar_function.py index 6583dcd8f6..56537e2f54 100644 --- a/tests/system-test/8-stream/scalar_function.py +++ b/tests/system-test/8-stream/scalar_function.py @@ -1,11 +1,8 @@ import sys -import time import threading -from taos.tmq import Consumer from util.log import * from util.sql import * from util.cases import * -from util.dnodes import * from util.common import * class TDTestCase: @@ -17,7 +14,7 @@ class TDTestCase: self.tdCom = tdCom def scalar_function(self, partition="tbname", fill_history_value=None): - tdLog.info(f"testing stream scalar funtion partition: {partition}, fill_history_value: {fill_history_value}") + tdLog.info(f"*** testing stream scalar funtion partition: {partition}, fill_history_value: {fill_history_value} ***") self.tdCom.case_name = sys._getframe().f_code.co_name tdLog.info("preparing data ...") self.tdCom.prepare_data(fill_history_value=fill_history_value) diff --git a/tests/system-test/8-stream/window_close_interval.py b/tests/system-test/8-stream/window_close_interval.py new file mode 100644 index 0000000000..31a566a0f8 --- /dev/null +++ b/tests/system-test/8-stream/window_close_interval.py @@ -0,0 +1,256 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def window_close_interval(self, interval, watermark=None, ignore_expired=None, partition="tbname", fill_value=None, delete=False): + tdLog.info(f"*** testing stream window_close+interval: interval: {interval}, watermark: {watermark}, ignore_expired: {ignore_expired}, partition: {partition}, fill: {fill_value}, delete: {delete} ***") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname": + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + # create stb/ctb/tb stream + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=stb_subtable_value, fill_value=fill_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=ctb_subtable_value, fill_value=fill_value) + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=tb_subtable_value, fill_value=fill_value) + + start_time = self.tdCom.date_time + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + if i == 0: + record_window_close_ts = window_close_ts + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + ts_value=self.tdCom.date_time+num*self.tdCom.offset + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + + if self.delete and i%2 != 0: + dt = f'cast({ts_value-num*self.tdCom.offset} as timestamp)' + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt) + if not fill_value: + for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + tdSql.checkEqual(tdSql.queryRows, i) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + if not fill_value: + for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + + tdSql.checkEqual(tdSql.queryRows, i) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + if not fill_value: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart limit {i+1}', i+1) + else: + self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart limit {i+1}', i+1) + if self.tdCom.disorder and not fill_value: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts) + if ignore_expired: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res1 = tdSql.queryResult + tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}') + res2 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res1 = tdSql.queryResult + tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}') + res2 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}', i+1) + else: + self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}', i+1) + if self.tdCom.subtable: + tdSql.query(f'select * from {self.ctb_name}') + for tname in [self.stb_name, self.ctb_name]: + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] , self.tdCom.range_count) + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + if fill_value: + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts) + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval']) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + + if self.tdCom.update: + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + if i == 0: + record_window_close_ts = window_close_ts + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + ts_value=self.tdCom.date_time+num*self.tdCom.offset + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts)) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts)) + self.tdCom.date_time = start_time + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + if (fill_value == "NULL" or fill_value == "NEXT" or fill_value == "LINEAR") and self.delete: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null', fill_value=fill_value) + else: + if self.delete and (fill_value == "PREV" or "value" in fill_value.lower()): + additional_options = f"where ts >= {start_ts}-1s and ts <= {start_ts}" + else: + additional_options = f"where ts >= {start_ts} and ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + if (fill_value == "NULL" or fill_value == "NEXT" or fill_value == "LINEAR") and self.delete: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null', fill_value=fill_value) + else: + if self.delete and (fill_value == "PREV" or "value" in fill_value.lower()): + additional_options = f"where ts >= {start_ts}-1s and ts <= {start_ts}" + else: + additional_options = f"where ts >= {start_ts} and ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + + + def run(self): + for watermark in [None, random.randint(15, 20)]: + for ignore_expired in [0, 1]: + self.window_close_interval(interval=random.randint(10, 15), watermark=watermark, ignore_expired=ignore_expired) + for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + for watermark in [None, random.randint(15, 20)]: + self.window_close_interval(interval=random.randint(10, 12), watermark=watermark, fill_value=fill_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_session.py b/tests/system-test/8-stream/window_close_session.py new file mode 100644 index 0000000000..8ee097ca10 --- /dev/null +++ b/tests/system-test/8-stream/window_close_session.py @@ -0,0 +1,127 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + self.tdCom.subtable = True + self.tdCom.update = True + self.tdCom.disorder = True + if self.tdCom.disorder: + self.tdCom.update = False + self.tdCom.partition_tbname_alias = "ptn_alias" if self.tdCom.subtable else "" + self.tdCom.partition_col_alias = "pcol_alias" if self.tdCom.subtable else "" + self.tdCom.partition_tag_alias = "ptag_alias" if self.tdCom.subtable else "" + self.tdCom.partition_expression_alias = "pexp_alias" if self.tdCom.subtable else "" + self.stb_name = str() + self.ctb_name = str() + self.tb_name = str() + self.tdCom.des_table_suffix = "_output" + self.tdCom.stream_suffix = "_stream" + self.tdCom.stream_case_when_tbname = "tbname" + self.tdCom.subtable_prefix = "prefix_" if self.tdCom.subtable else "" + self.tdCom.subtable_suffix = "_suffix" if self.tdCom.subtable else "" + self.stb_stream_des_table = str() + self.ctb_stream_des_table = str() + self.tb_stream_des_table = str() + self.downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.tdCom.stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list))) + self.tdCom.stb_source_select_str = ','.join(self.downsampling_function_list) + self.tdCom.tb_source_select_str = ','.join(self.downsampling_function_list[0:15]) + self.tdCom.partition_by_downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "min(t1)", "max(t2)", "sum(t3)", "first(t4)", "last(t5)", "count(t8)", "spread(t1)", "stddev(t2)"] + + def watermark_window_close_session(self, session, watermark, fill_history_value=None, delete=True): + tdLog.info(f"*** testing stream window_close+session: session: {session}, watermark: {watermark}, fill_history: {fill_history_value}, delete: {delete} ***") + self.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.tdCom.tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list[0:15]))) + self.tdCom.date_time = self.tdCom.dataDict["start_ts"] + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + # create stb/ctb/tb stream + # self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, fill_history_value=fill_history_value) + for i in range(self.tdCom.range_count): + if i == 0: + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + else: + self.tdCom.date_time = window_close_ts + 1 + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + if watermark_value is not None: + expected_value = i + 1 + for ts_value in [self.tdCom.date_time, window_close_ts-1]: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + # for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + for tbname in [self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + if not fill_history_value: + tdSql.checkEqual(tdSql.queryRows, i) + else: + expected_value = i + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + if fill_history_value: + self.tdCom.update_delete_history_data(delete=delete) + + # for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if not fill_history_value: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value}', expected_value) + else: + self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value}', expected_value) + else: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value+1}') + else: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value+1}') + + + + def run(self): + for fill_history_value in [None, 1]: + for watermark in [None, random.randint(20, 25)]: + self.watermark_window_close_session(session=random.randint(10, 15), watermark=watermark, fill_history_value=fill_history_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_state_window.py b/tests/system-test/8-stream/window_close_state_window.py new file mode 100644 index 0000000000..d6e6a2c093 --- /dev/null +++ b/tests/system-test/8-stream/window_close_state_window.py @@ -0,0 +1,73 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def window_close_state_window(self, state_window, delete=True): + tdLog.info(f"*** testing stream window_close+session: state_window: {state_window}, delete: {delete} ***") + self.case_name = sys._getframe().f_code.co_name + self.delete = delete + self.tdCom.prepare_data(state_window=state_window) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.tdCom.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tdCom.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + state_window_col_name = self.tdCom.dataDict["state_window"] + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} state_window({state_window_col_name})', trigger_mode="window_close") + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} state_window({state_window_col_name})', trigger_mode="window_close") + state_window_max = self.tdCom.dataDict['state_window_max'] + state_window_value_inmem = 0 + sleep_step = 0 + for i in range(self.tdCom.range_count): + state_window_value = random.randint(int((i)*state_window_max/self.tdCom.range_count), int((i+1)*state_window_max/self.tdCom.range_count)) + while state_window_value == state_window_value_inmem: + state_window_value = random.randint(int((i)*state_window_max/self.tdCom.range_count), int((i+1)*state_window_max/self.tdCom.range_count)) + if sleep_step < self.tdCom.default_interval: + sleep_step += 1 + time.sleep(1) + else: + return + for j in range(2, self.tdCom.range_count+3): + tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.tdCom.update and i%2 == 0: + tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.delete and i%2 != 0: + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt) + self.tdCom.date_time += 1 + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} state_window({state_window_col_name}) limit {i}', i) + else: + self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} state_window({state_window_col_name}) limit {i}', i) + state_window_value_inmem = state_window_value + + + + def run(self): + for delete in [True, False]: + self.window_close_state_window(state_window="c1", delete=delete) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From ae100c06e32b596a0a70ec37e7c319f3208e94d0 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Mon, 21 Aug 2023 18:14:35 +0800 Subject: [PATCH 03/16] update --- tests/pytest/util/common.py | 117 ++++++++++-- tests/system-test/8-stream/interval.py | 240 ------------------------- 2 files changed, 105 insertions(+), 252 deletions(-) delete mode 100644 tests/system-test/8-stream/interval.py diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 1bf2668e9a..e6e2822f0e 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -102,8 +102,8 @@ class TDCom: self.smlChildTableName_value = None self.defaultJSONStrType_value = None self.smlTagNullName_value = None - self.default_varchar_length = 256 - self.default_nchar_length = 256 + self.default_varchar_length = 6 + self.default_nchar_length = 6 self.default_varchar_datatype = "letters" self.default_nchar_datatype = "letters" self.default_tagname_prefix = "t" @@ -122,6 +122,7 @@ class TDCom: self.stb_name = "stb" self.ctb_name = "ctb" self.tb_name = "tb" + self.tbname = str() self.need_tagts = False self.tag_type_str = "" self.column_type_str = "" @@ -139,6 +140,38 @@ class TDCom: self.range_count = 5 self.default_interval = 5 self.stream_timeout = 12 + self.record_history_ts = str() + self.subtable = True + self.partition_tbname_alias = "ptn_alias" if self.subtable else "" + self.partition_col_alias = "pcol_alias" if self.subtable else "" + self.partition_tag_alias = "ptag_alias" if self.subtable else "" + self.partition_expression_alias = "pexp_alias" if self.subtable else "" + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.subtable_prefix = "prefix_" if self.subtable else "" + self.subtable_suffix = "_suffix" if self.subtable else "" + self.downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list))) + self.tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list[0:15]))) + self.stb_source_select_str = ','.join(self.downsampling_function_list) + self.tb_source_select_str = ','.join(self.downsampling_function_list[0:15]) + self.fill_function_list = ["min(c1)", "max(c2)", "sum(c3)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.fill_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list))) + self.fill_stb_source_select_str = ','.join(self.fill_function_list) + self.fill_tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list[0:13]))) + self.fill_tb_source_select_str = ','.join(self.fill_function_list[0:13]) + self.stream_case_when_tbname = "tbname" + + self.update = True + self.disorder = True + if self.disorder: + self.update = False + self.partition_by_downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "min(t1)", "max(t2)", "sum(t3)", "first(t4)", "last(t5)", "count(t8)", "spread(t1)", "stddev(t2)"] # def init(self, conn, logSql): # # tdSql.init(conn.cursor(), logSql) @@ -1148,10 +1181,10 @@ class TDCom: if dbname is not None: self.dbname = dbname if tbname is not None: - self.tb_name = f'{self.dbname}.{tbname}' + self.tbname = f'{self.dbname}.{tbname}' else: if tbname is not None: - self.tb_name = tbname + self.tbname = tbname self.sgen_column_value_list(column_ele_list, need_null, ts_value) # column_value_str = ", ".join(str(v) for v in self.column_value_list) @@ -1165,7 +1198,7 @@ class TDCom: column_value_str += f'{column_value}, ' column_value_str = column_value_str.rstrip()[:-1] if int(count) <= 1: - insert_sql = f'insert into {self.tb_name} values ({column_value_str});' + insert_sql = f'insert into {self.tbname} values ({column_value_str});' tdSql.execute(insert_sql) else: for num in range(count): @@ -1180,24 +1213,23 @@ class TDCom: else: column_value_str += f'{column_value}, ' column_value_str = column_value_str.rstrip()[:-1] - insert_sql = f'insert into {self.tb_name} values ({column_value_str});' - print(insert_sql) + insert_sql = f'insert into {self.tbname} values ({column_value_str});' tdSql.execute(insert_sql) def sdelete_rows(self, dbname=None, tbname=None, start_ts=None, end_ts=None, ts_key=None): if dbname is not None: self.dbname = dbname if tbname is not None: - self.tb_name = f'{self.dbname}.{tbname}' + self.tbname = f'{self.dbname}.{tbname}' else: if tbname is not None: - self.tb_name = tbname + self.tbname = tbname if ts_key is None: ts_col_name = self.default_colts_name else: ts_col_name = ts_key - base_del_sql = f'delete from {self.tb_name} ' + base_del_sql = f'delete from {self.tbname} ' if end_ts is not None: if ":" in start_ts and "-" in start_ts: start_ts = f"{start_ts}" @@ -1266,6 +1298,10 @@ class TDCom: final_list.append(tuple(tmpl)) return final_list + def str_ts_trans_bigint(self, str_ts): + tdSql.query(f'select cast({str_ts} as bigint)') + return tdSql.queryResult[0][0] + def cast_query_data(self, query_data): tdLog.info("cast query data ...") col_type_list = self.column_type_str.split(',') @@ -1276,8 +1312,8 @@ class TDCom: query_data_l = list(query_data_t) for i,v in enumerate(query_data_l): if v is not None: - if " ".join(col_tag_type_list[i].strip().split(" ")[1:]) == "nchar(256)": - tdSql.query(f'select cast("{v}" as binary(256))') + if " ".join(col_tag_type_list[i].strip().split(" ")[1:]) == "nchar(6)": + tdSql.query(f'select cast("{v}" as binary(6))') else: tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})') query_data_l[i] = tdSql.queryResult[0][0] @@ -1404,6 +1440,63 @@ class TDCom: tdSql.checkNotEqual(res1, res2) # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + def check_stream_res(self, sql, expected_res, max_delay): + tdSql.query(sql) + latency = 0 + + while tdSql.queryRows != expected_res: + tdSql.query(sql) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if max_delay is not None: + if latency == 0: + return False + tdSql.checkEqual(tdSql.queryRows, expected_res) + + def check_stream(self, sql1, sql2, expected_count, max_delay=None): + self.check_stream_res(sql1, expected_count, max_delay) + self.check_query_data(sql1, sql2) + + def cal_watermark_window_close_session_endts(self, start_ts, watermark=None, session=None): + """cal endts for close window + + :param start_ts: [start timestamp: self.date_time] + :type start_ts: [epoch time] + :param watermark: [second level and > session] + :type watermark: [s] + :param precision: [default "ms" and only support "ms" now] + :type precision: str, optional + """ + if watermark is not None: + return start_ts + watermark*self.offset + 1 + else: + return start_ts + session*self.offset + 1 + + def cal_watermark_window_close_interval_endts(self, start_ts, interval, watermark=None): + """cal endts for close window + + :param start_ts: [start timestamp: self.date_time] + :type start_ts: [epoch time] + :param interval: [second level] + :type interval: [s] + :param watermark: [second level and > interval] + :type watermark: [s] + :param precision: [default "ms" and only support "ms" now] + :type precision: str, optional + """ + if watermark is not None: + return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + watermark*self.offset + else: + return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + + def update_delete_history_data(self, delete): + self.sinsert_rows(tbname=self.ctb_name, ts_value=self.record_history_ts) + self.sinsert_rows(tbname=self.tb_name, ts_value=self.record_history_ts) + if delete: + self.sdelete_rows(tbname=self.ctb_name, start_ts=self.time_cast(self.record_history_ts, "-")) + self.sdelete_rows(tbname=self.tb_name, start_ts=self.time_cast(self.record_history_ts, "-")) def prepare_data(self, interval=None, watermark=None, session=None, state_window=None, state_window_max=127, interation=3, range_count=None, precision="ms", fill_history_value=0, ext_stb=None): self.clean_env() diff --git a/tests/system-test/8-stream/interval.py b/tests/system-test/8-stream/interval.py deleted file mode 100644 index 92cd28c44d..0000000000 --- a/tests/system-test/8-stream/interval.py +++ /dev/null @@ -1,240 +0,0 @@ -import sys -import time -import threading -from taos.tmq import Consumer -from util.log import * -from util.sql import * -from util.cases import * -from util.dnodes import * -from util.common import * - -class TDTestCase: - updatecfgDict = {'debugFlag': 135, 'asynclog': 0} - def init(self, conn, logSql, replicaVar=1): - self.replicaVar = int(replicaVar) - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - self.tdCom = tdCom - self.subtable = True - self.partition_tbname_alias = "ptn_alias" if self.subtable else "" - self.partition_col_alias = "pcol_alias" if self.subtable else "" - self.partition_tag_alias = "ptag_alias" if self.subtable else "" - self.partition_expression_alias = "pexp_alias" if self.subtable else "" - self.stb_name = str() - self.ctb_name = str() - self.tb_name = str() - self.des_table_suffix = "_output" - self.stream_suffix = "_stream" - self.subtable_prefix = "prefix_" if self.subtable else "" - self.subtable_suffix = "_suffix" if self.subtable else "" - self.stb_stream_des_table = str() - self.ctb_stream_des_table = str() - self.tb_stream_des_table = str() - self.downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", - "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", - "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] - self.stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list))) - self.stb_source_select_str = ','.join(self.downsampling_function_list) - self.tb_source_select_str = ','.join(self.downsampling_function_list[0:15]) - - def at_once_interval(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, case_when=None): - tdLog.info(f"testing stream at_once+interval: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}") - self.delete = delete - self.tdCom.case_name = sys._getframe().f_code.co_name - self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value) - self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") - self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") - self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") - self.stb_stream_des_table = f'{self.stb_name}{self.des_table_suffix}' - self.ctb_stream_des_table = f'{self.ctb_name}{self.des_table_suffix}' - self.tb_stream_des_table = f'{self.tb_name}{self.des_table_suffix}' - self.tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list[0:15]))) - - if partition == "tbname": - if case_when: - stream_case_when_partition = case_when - else: - stream_case_when_partition = self.partition_tbname_alias - - partition_elm_alias = self.partition_tbname_alias - elif partition == "c1": - if case_when: - stream_case_when_partition = case_when - else: - stream_case_when_partition = self.partition_col_alias - partition_elm_alias = self.partition_col_alias - elif partition == "abs(c1)": - partition_elm_alias = self.partition_expression_alias - elif partition is None: - partition_elm_alias = '"no_partition"' - else: - partition_elm_alias = self.partition_tag_alias - if partition == "tbname" or partition is None: - if case_when: - stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", {stream_case_when_partition}), "{self.subtable_suffix}")' if self.subtable else None - ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.subtable_prefix}", {stream_case_when_partition}), "{self.subtable_suffix}")' if self.subtable else None - tb_subtable_value = f'concat(concat("{self.tb_name}_{self.subtable_prefix}", {stream_case_when_partition}), "{self.subtable_suffix}")' if self.subtable else None - else: - stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", {partition_elm_alias}), "{self.subtable_suffix}")' if self.subtable else None - ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.subtable_prefix}", {partition_elm_alias}), "{self.subtable_suffix}")' if self.subtable else None - tb_subtable_value = f'concat(concat("{self.tb_name}_{self.subtable_prefix}", {partition_elm_alias}), "{self.subtable_suffix}")' if self.subtable else None - else: - stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None - ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None - tb_subtable_value = f'concat(concat("{self.tb_name}_{self.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None - if partition: - partition_elm = f'partition by {partition} {partition_elm_alias}' - else: - partition_elm = "" - if fill_value: - if "value" in fill_value.lower(): - fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' - self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) - self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) - if fill_value: - if "value" in fill_value.lower(): - fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' - self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) - start_time = self.tdCom.date_time - for i in range(self.tdCom.range_count): - ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' - ts_cast_delete_value = self.tdCom.time_cast(ts_value) - self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value) - if i%2 == 0: - self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value) - if self.delete and i%2 != 0: - self.tdCom.sdelete_rows(tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value) - self.tdCom.date_time += 1 - self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value) - if i%2 == 0: - self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value) - if self.delete and i%2 != 0: - self.tdCom.sdelete_rows(tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value) - self.tdCom.date_time += 1 - if partition: - partition_elm = f'partition by {partition}' - else: - partition_elm = "" - - if not fill_value: - for tbname in [self.stb_name, self.ctb_name, self.tb_name]: - if tbname != self.tb_name: - self.tdCom.check_query_data(f'select wstart, {self.stb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) - else: - self.tdCom.check_query_data(f'select wstart, {self.tb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) - - if self.subtable: - for tname in [self.stb_name, self.ctb_name]: - tdSql.query(f'select * from {self.ctb_name}') - ptn_counter = 0 - for c1_value in tdSql.queryResult: - if partition == "c1": - tdSql.query(f'select count(*) from `{tname}_{self.subtable_prefix}{abs(c1_value[1])}{self.subtable_suffix}`;') - elif partition is None: - tdSql.query(f'select count(*) from `{tname}_{self.subtable_prefix}no_partition{self.subtable_suffix}`;') - elif partition == "abs(c1)": - abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{tname}_{self.subtable_prefix}{abs_c1_value}{self.subtable_suffix}`;') - elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{tname}_{self.subtable_prefix}{self.ctb_name}{self.subtable_suffix}`;') - ptn_counter += 1 - tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) - - tdSql.query(f'select * from {self.tb_name}') - ptn_counter = 0 - for c1_value in tdSql.queryResult: - if partition == "c1": - tdSql.query(f'select count(*) from `{self.tb_name}_{self.subtable_prefix}{abs(c1_value[1])}{self.subtable_suffix}`;') - elif partition is None: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.subtable_prefix}no_partition{self.subtable_suffix}`;') - elif partition == "abs(c1)": - abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{self.tb_name}_{self.subtable_prefix}{abs_c1_value}{self.subtable_suffix}`;') - elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.subtable_prefix}{self.tb_name}{self.subtable_suffix}`;') - ptn_counter += 1 - - tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) - if fill_value: - end_date_time = self.tdCom.date_time - final_range_count = self.tdCom.range_count - history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' - start_ts = self.tdCom.time_cast(history_ts, "-") - future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' - end_ts = self.tdCom.time_cast(future_ts) - self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) - self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) - self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) - self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) - self.tdCom.date_time = start_time - # update - history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' - start_ts = self.tdCom.time_cast(history_ts, "-") - future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' - end_ts = self.tdCom.time_cast(future_ts) - self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) - self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) - self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) - self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) - self.tdCom.date_time = start_time - for i in range(self.tdCom.range_count): - ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' - ts_cast_delete_value = self.tdCom.time_cast(ts_value) - self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) - self.tdCom.date_time += 1 - self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) - self.tdCom.date_time += 1 - if self.delete: - self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value) - self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value) - for tbname in [self.stb_name, self.ctb_name, self.tb_name]: - if tbname != self.tb_name: - if "value" in fill_value.lower(): - fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' - if partition == "tbname": - self.tdCom.check_query_data(f'select wstart, {self.fill_stb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) - else: - self.tdCom.check_query_data(f'select wstart, {self.fill_stb_output_select_str} from {tbname}{self.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) - else: - if "value" in fill_value.lower(): - fill_value='VALUE,1,2,3,6,7,8,9,10,11' - if partition == "tbname": - self.tdCom.check_query_data(f'select wstart, {self.fill_tb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) - else: - self.tdCom.check_query_data(f'select wstart, {self.fill_tb_output_select_str} from {tbname}{self.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) - - if self.delete: - self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value) - self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value) - for tbname in [self.stb_name, self.ctb_name, self.tb_name]: - if tbname != self.tb_name: - if "value" in fill_value.lower(): - fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' - if partition == "tbname": - self.tdCom.check_query_data(f'select wstart, {self.fill_stb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) - else: - self.tdCom.check_query_data(f'select wstart, {self.fill_stb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) - - else: - if "value" in fill_value.lower(): - fill_value='VALUE,1,2,3,6,7,8,9,10,11' - if partition == "tbname": - self.tdCom.check_query_data(f'select wstart, {self.fill_tb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) - else: - self.tdCom.check_query_data(f'select wstart, {self.fill_tb_output_select_str} from {tbname}{self.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) - - - def run(self): - self.at_once_interval(interval=random.randint(10, 15), partition="tbname", delete=True) - self.at_once_interval(interval=random.randint(10, 15), partition="c1", delete=True) - self.at_once_interval(interval=random.randint(10, 15), partition="abs(c1)", delete=True) - self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True) - - def stop(self): - tdSql.close() - tdLog.success(f"{__file__} successfully executed") - -event = threading.Event() - -tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 7450a8584d0284dd5ec346c9eb5fbd1107dd93da Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Mon, 21 Aug 2023 19:42:52 +0800 Subject: [PATCH 04/16] update --- tests/pytest/util/common.py | 45 ++++- .../8-stream/max_delay_interval.py | 161 ++++++++++++++++++ .../system-test/8-stream/max_delay_session.py | 100 +++++++++++ .../8-stream/partition_interval.py | 105 ++++++++++++ .../8-stream/window_close_session.py | 29 ---- 5 files changed, 410 insertions(+), 30 deletions(-) create mode 100644 tests/system-test/8-stream/max_delay_interval.py create mode 100644 tests/system-test/8-stream/max_delay_session.py create mode 100644 tests/system-test/8-stream/partition_interval.py diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index e6e2822f0e..a512ae605f 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -29,7 +29,7 @@ from util.constant import * from dataclasses import dataclass,field from typing import List from datetime import datetime - +import re @dataclass class DataSet: ts_data : List[int] = field(default_factory=list) @@ -141,6 +141,8 @@ class TDCom: self.default_interval = 5 self.stream_timeout = 12 self.record_history_ts = str() + self.precision = "ms" + self.date_time = self.genTs(precision=self.precision)[0] self.subtable = True self.partition_tbname_alias = "ptn_alias" if self.subtable else "" self.partition_col_alias = "pcol_alias" if self.subtable else "" @@ -172,6 +174,31 @@ class TDCom: self.update = False self.partition_by_downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "count(c8)", "spread(c1)", "stddev(c2)", "hyperloglog(c11)", "min(t1)", "max(t2)", "sum(t3)", "first(t4)", "last(t5)", "count(t8)", "spread(t1)", "stddev(t2)"] + + self.stb_data_filter_sql = f'ts >= {self.date_time}+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c9 <> 0 or c10 is not Null or c11 is Null or \ + c12 between "na" and "nchar4" and c11 not between "bi" and "binary" and c12 match "nchar[19]" and c12 nmatch "nchar[25]" or c13 = True or \ + c5 in (1, 2, 3) or c6 not in (6, 7) and c12 like "nch%" and c11 not like "bina_" and c6 < 10 or c12 is Null or c8 >= 4 and t1 = 1 or t2 > 1 \ + and t3 != 4 or c4 <= 3 and t9 <> 0 or t10 is not Null or t11 is Null or t12 between "na" and "nchar4" and t11 not between "bi" and "binary" \ + or t12 match "nchar[19]" or t12 nmatch "nchar[25]" or t13 = True or t5 in (1, 2, 3) or t6 not in (6, 7) and t12 like "nch%" \ + and t11 not like "bina_" and t6 <= 10 or t12 is Null or t8 >= 4' + self.tb_data_filter_sql = self.stb_data_filter_sql.partition(" and t1")[0] + + self.filter_source_select_elm = "*" + self.stb_filter_des_select_elm = "ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13" + self.partitial_stb_filter_des_select_elm = ",".join(self.stb_filter_des_select_elm.split(",")[:3]) + self.exchange_stb_filter_des_select_elm = ",".join([self.stb_filter_des_select_elm.split(",")[0], self.stb_filter_des_select_elm.split(",")[2], self.stb_filter_des_select_elm.split(",")[1]]) + self.partitial_ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:2]) + self.tb_filter_des_select_elm = self.stb_filter_des_select_elm.partition(", t1")[0] + self.tag_filter_des_select_elm = self.stb_filter_des_select_elm.partition("c13, ")[2] + self.partition_by_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.partition_by_downsampling_function_list))) + self.partition_by_stb_source_select_str = ','.join(self.partition_by_downsampling_function_list) + self.exchange_tag_filter_des_select_elm = ",".join([self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[0], self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[2], self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[1]]) + self.partitial_tag_filter_des_select_elm = ",".join(self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[:3]) + self.partitial_tag_stb_filter_des_select_elm = "ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, t1, t3, t2, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13" + self.cast_tag_filter_des_select_elm = "t5,t11,t13" + self.cast_tag_stb_filter_des_select_elm = "ts, t1, t2, t3, t4, cast(t1 as TINYINT UNSIGNED), t6, t7, t8, t9, t10, cast(t2 as varchar(256)), t12, cast(t3 as bool)" + self.tag_count = len(self.tag_filter_des_select_elm.split(",")) + self.state_window_range = list() # def init(self, conn, logSql): # # tdSql.init(conn.cursor(), logSql) @@ -1322,6 +1349,22 @@ class TDCom: nl.append(tuple(query_data_l)) return nl + def trans_time_to_s(self, runtime): + if "d" in str(runtime).lower(): + d_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(d_num) * 24 * 60 * 60 + elif "h" in str(runtime).lower(): + h_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(h_num) * 60 * 60 + elif "m" in str(runtime).lower(): + m_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(m_num) * 60 + elif "s" in str(runtime).lower(): + s_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + else: + s_num = 60 + return int(s_num) + def check_query_data(self, sql1, sql2, sorted=False, fill_value=None, tag_value_list=None, defined_tag_count=None, partition=True, use_exist_stb=False, subtable=None, reverse_check=False): tdLog.info("checking query data ...") if tag_value_list: diff --git a/tests/system-test/8-stream/max_delay_interval.py b/tests/system-test/8-stream/max_delay_interval.py new file mode 100644 index 0000000000..9306118e30 --- /dev/null +++ b/tests/system-test/8-stream/max_delay_interval.py @@ -0,0 +1,161 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_max_delay_interval(self, interval, max_delay, watermark=None, fill_value=None, delete=False): + tdLog.info(f"*** testing stream max_delay+interval: interval: {interval}, watermark: {watermark}, fill_value: {fill_value}, delete: {delete} ***") + self.delete = delete + self.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.tdCom.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tdCom.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.tdCom.date_time = 1658921623245 + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + fill_watermark_value = watermark_value + else: + watermark_value = None + fill_watermark_value = "0s" + + max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s' + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) + init_num = 0 + start_time = self.tdCom.date_time + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + if not fill_value: + for tbname in [self.stb_stream_des_table, self.tdCom.ctb_stream_des_table, self.tdCom.tb_stream_des_table]: + if tbname != self.tdCom.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + tdSql.checkEqual(tdSql.queryRows, init_num) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + if i == 0: + init_num = 2 + i + if watermark is not None: + init_num += 1 + else: + init_num += 1 + time.sleep(int(max_delay.replace("s", ""))) + if not fill_value: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s)') + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s)') + if fill_value: + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts) + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval']) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + if self.tdCom.update: + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts)) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts)) + time.sleep(int(max_delay.replace("s", ""))) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts}+{self.tdCom.dataDict["interval"]}s+{fill_watermark_value} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value})', fill_value=fill_value) + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts}+{self.tdCom.dataDict["interval"]}s+{fill_watermark_value} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value})', fill_value=fill_value) + + + def run(self): + for watermark in [None, random.randint(20, 25)]: + self.watermark_max_delay_interval(interval=random.choice([15]), watermark=watermark, max_delay=f"{random.randint(5, 6)}s") + for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + self.watermark_max_delay_interval(interval=random.randint(10, 15), watermark=None, max_delay=f"{random.randint(5, 6)}s", fill_value=fill_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/max_delay_session.py b/tests/system-test/8-stream/max_delay_session.py new file mode 100644 index 0000000000..874665dcc9 --- /dev/null +++ b/tests/system-test/8-stream/max_delay_session.py @@ -0,0 +1,100 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_max_delay_session(self, session, watermark, max_delay, fill_history_value=None): + tdLog.info(f"*** testing stream max_delay+session: session: {session}, watermark: {watermark}, max_delay: {max_delay}, fill_history_value: {fill_history_value} ***") + self.tdCom.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.tdCom.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tdCom.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.tdCom.date_time = self.tdCom.dataDict["start_ts"] + + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s' + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value) + init_num = 0 + for i in range(self.tdCom.range_count): + if i == 0: + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + else: + self.tdCom.date_time = window_close_ts + 1 + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + + if watermark_value is not None: + for ts_value in [self.tdCom.date_time, window_close_ts-1]: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + for tbname in [self.tdCom.ctb_stream_des_table, self.tdCom.tb_stream_des_table]: + if tbname != self.tdCom.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + if not fill_history_value: + tdSql.checkEqual(tdSql.queryRows, init_num) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if i == 0: + init_num = 2 + i + else: + init_num += 1 + if watermark_value is not None: + expected_value = init_num + else: + expected_value = i + 1 + + if not fill_history_value: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)', expected_value, max_delay) + else: + self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)', expected_value, max_delay) + else: + self.tdCom.update_delete_history_data(delete=True) + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + else: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + + def run(self): + for fill_history_value in [None, 1]: + for watermark in [None, random.randint(20, 30)]: + self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/partition_interval.py b/tests/system-test/8-stream/partition_interval.py new file mode 100644 index 0000000000..f12cf038e0 --- /dev/null +++ b/tests/system-test/8-stream/partition_interval.py @@ -0,0 +1,105 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def partitionby_interval(self, interval=None, partition_by_elm="tbname", ignore_expired=None): + tdLog.info(f"*** testing stream partition+interval: interval: {interval}, partition_by: {partition_by_elm}, ignore_expired: {ignore_expired} ***") + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.tdCom.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tdCom.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + ctb_name_list = list() + for i in range(1, self.tdCom.range_count): + ctb_name = self.tdCom.get_long_name() + ctb_name_list.append(ctb_name) + self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name) + if interval is not None: + source_sql = f'select _wstart AS wstart, {self.tdCom.partition_by_stb_source_select_str} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s)' + else: + source_sql = f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm}' + + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=source_sql, ignore_expired=ignore_expired) + # insert data + count = 1 + step_count = 1 + for i in range(1, self.tdCom.range_count): + if i == 1: + record_window_close_ts = self.tdCom.date_time - 15 * self.tdCom.offset + ctb_name = self.tdCom.get_long_name() + self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name) + if i % 2 == 0: + step_count += i + for j in range(count, step_count): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=f'{self.tdCom.date_time}+{j}s') + for ctb_name in ctb_name_list: + self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=f'{self.tdCom.date_time}+{j}s') + count += i + else: + step_count += 1 + for i in range(2): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=f'{self.tdCom.date_time}+{count}s') + for ctb_name in ctb_name_list: + self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=f'{self.tdCom.date_time}+{count}s') + count += 1 + # check result + for colname in self.tdCom.partition_by_downsampling_function_list: + if "first" not in colname and "last" not in colname: + if interval is not None: + self.tdCom.check_query_data(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;', f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;') + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;') + + if self.tdCom.disorder: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts) + for ctb_name in ctb_name_list: + self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=record_window_close_ts) + if ignore_expired: + if "first" not in colname and "last" not in colname: + for colname in self.tdCom.partition_by_downsampling_function_list: + if interval is not None: + tdSql.query(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;') + res1 = tdSql.queryResult + tdSql.query(f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;') + res2 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;') + + else: + for colname in self.tdCom.partition_by_downsampling_function_list: + if "first" not in colname and "last" not in colname: + if interval is not None: + self.tdCom.check_query_data(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;', f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;') + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;') + + def run(self): + for interval in [None, 10]: + for ignore_expired in [0, 1]: + self.partitionby_interval(interval=interval, partition_by_elm="tbname", ignore_expired=ignore_expired) + self.partitionby_interval(interval=10, partition_by_elm="t1") + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_session.py b/tests/system-test/8-stream/window_close_session.py index 8ee097ca10..ddd366b45a 100644 --- a/tests/system-test/8-stream/window_close_session.py +++ b/tests/system-test/8-stream/window_close_session.py @@ -12,34 +12,6 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) self.tdCom = tdCom - self.tdCom.subtable = True - self.tdCom.update = True - self.tdCom.disorder = True - if self.tdCom.disorder: - self.tdCom.update = False - self.tdCom.partition_tbname_alias = "ptn_alias" if self.tdCom.subtable else "" - self.tdCom.partition_col_alias = "pcol_alias" if self.tdCom.subtable else "" - self.tdCom.partition_tag_alias = "ptag_alias" if self.tdCom.subtable else "" - self.tdCom.partition_expression_alias = "pexp_alias" if self.tdCom.subtable else "" - self.stb_name = str() - self.ctb_name = str() - self.tb_name = str() - self.tdCom.des_table_suffix = "_output" - self.tdCom.stream_suffix = "_stream" - self.tdCom.stream_case_when_tbname = "tbname" - self.tdCom.subtable_prefix = "prefix_" if self.tdCom.subtable else "" - self.tdCom.subtable_suffix = "_suffix" if self.tdCom.subtable else "" - self.stb_stream_des_table = str() - self.ctb_stream_des_table = str() - self.tb_stream_des_table = str() - self.downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", - "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", - "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] - self.tdCom.stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list))) - self.tdCom.stb_source_select_str = ','.join(self.downsampling_function_list) - self.tdCom.tb_source_select_str = ','.join(self.downsampling_function_list[0:15]) - self.tdCom.partition_by_downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "count(c8)", "spread(c1)", - "stddev(c2)", "hyperloglog(c11)", "min(t1)", "max(t2)", "sum(t3)", "first(t4)", "last(t5)", "count(t8)", "spread(t1)", "stddev(t2)"] def watermark_window_close_session(self, session, watermark, fill_history_value=None, delete=True): tdLog.info(f"*** testing stream window_close+session: session: {session}, watermark: {watermark}, fill_history: {fill_history_value}, delete: {delete} ***") @@ -53,7 +25,6 @@ class TDTestCase: self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' - self.tdCom.tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list[0:15]))) self.tdCom.date_time = self.tdCom.dataDict["start_ts"] if watermark is not None: watermark_value = f'{self.tdCom.dataDict["watermark"]}s' From da93dc45fc6689d434fe3f62d7e9d43a9ccb8f8c Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Tue, 22 Aug 2023 15:49:45 +0800 Subject: [PATCH 05/16] test: update --- tests/pytest/util/common.py | 30 +-- .../8-stream/at_once_interval_ext.py | 212 ++++++++++++++++++ .../8-stream/max_delay_interval.py | 14 +- .../system-test/8-stream/max_delay_session.py | 12 +- .../8-stream/partition_interval.py | 4 +- .../8-stream/window_close_session_ext.py | 81 +++++++ .../8-stream/window_close_state_window.py | 8 +- 7 files changed, 328 insertions(+), 33 deletions(-) create mode 100644 tests/system-test/8-stream/at_once_interval_ext.py create mode 100644 tests/system-test/8-stream/window_close_session_ext.py diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index a512ae605f..80053e66d1 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -166,6 +166,7 @@ class TDCom: self.fill_stb_source_select_str = ','.join(self.fill_function_list) self.fill_tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list[0:13]))) self.fill_tb_source_select_str = ','.join(self.fill_function_list[0:13]) + self.ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:13]) self.stream_case_when_tbname = "tbname" self.update = True @@ -1101,11 +1102,11 @@ class TDCom: self.sgen_column_type_str(column_elm_list) self.sgen_tag_type_str(tag_elm_list) if self.dbname is not None: - self.stb_name = f'{self.dbname}.{stbname}' + stb_name = f'{self.dbname}.{stbname}' else: - self.stb_name = stbname + stb_name = stbname if int(count) <= 1: - create_stable_sql = f'create {use_name} {self.stb_name} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + create_stable_sql = f'create {use_name} {stb_name} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' tdSql.execute(create_stable_sql) else: for _ in range(count): @@ -1134,13 +1135,13 @@ class TDCom: tag_value_str = tag_value_str.rstrip()[:-1] if dbname is not None: self.dbname = dbname - self.ctb_name = f'{self.dbname}.{ctbname}' + ctb_name = f'{self.dbname}.{ctbname}' else: - self.ctb_name = ctbname + ctb_name = ctbname if stbname is not None: - self.stb_name = stbname + stb_name = stbname if int(count) <= 1: - create_ctable_sql = f'create {use_name} {self.ctb_name} using {self.stb_name} tags ({tag_value_str}) {ctb_params};' + create_ctable_sql = f'create {use_name} {ctb_name} using {stb_name} tags ({tag_value_str}) {ctb_params};' tdSql.execute(create_ctable_sql) else: for _ in range(count): @@ -1191,11 +1192,11 @@ class TDCom: tb_params += f'{param} "{value}" ' self.sgen_column_type_str(column_elm_list) if self.dbname is not None: - self.tb_name = f'{self.dbname}.{tbname}' + tb_name = f'{self.dbname}.{tbname}' else: - self.tb_name = tbname + tb_name = tbname if int(count) <= 1: - create_table_sql = f'create {use_name} {self.tb_name} ({self.column_type_str}) {tb_params};' + create_table_sql = f'create {use_name} {tb_name} ({self.column_type_str}) {tb_params};' tdSql.execute(create_table_sql) else: for _ in range(count): @@ -1580,6 +1581,10 @@ class TDCom: self.date_time = self.genTs(precision=self.precision)[0] self.screateDb(dbname=self.dbname, precision=self.precision) + if ext_stb: + self.screate_stable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table) + self.screate_ctable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table, ctbname=self.ext_ctb_stream_des_table) + self.screate_table(dbname=self.dbname, tbname=self.ext_tb_stream_des_table) self.screate_stable(dbname=self.dbname, stbname=self.stb_name) self.screate_ctable(dbname=self.dbname, stbname=self.stb_name, ctbname=self.ctb_name) self.screate_table(dbname=self.dbname, tbname=self.tb_name) @@ -1590,9 +1595,6 @@ class TDCom: self.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) if i == 1: self.record_history_ts = ts_value - if ext_stb: - self.screate_stable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table) - self.screate_ctable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table, ctbname=self.ext_ctb_stream_des_table) - self.screate_table(dbname=self.dbname, tbname=self.ext_tb_stream_des_table) + tdCom = TDCom() diff --git a/tests/system-test/8-stream/at_once_interval_ext.py b/tests/system-test/8-stream/at_once_interval_ext.py new file mode 100644 index 0000000000..aa9e5029f9 --- /dev/null +++ b/tests/system-test/8-stream/at_once_interval_ext.py @@ -0,0 +1,212 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def at_once_interval_ext(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, interval_value=None, subtable=None, case_when=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + if use_except: + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm or len(stb_field_name_value.split(",")) == len(self.tdCom.partitial_stb_filter_des_select_elm.split(",")): + partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str + else: + partitial_tb_source_str = self.tdCom.ext_tb_source_select_str + else: + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: + partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str + else: + partitial_tb_source_str = self.tdCom.ext_tb_source_select_str + + if stb_field_name_value is not None: + if len(stb_field_name_value) == 0: + stb_field_name_value = ",".join(self.tdCom.tb_filter_des_select_elm.split(",")[:5]) + # else: + # stb_field_name_value = self.tdCom.tb_filter_des_select_elm + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + defined_tag_count = len(tag_value.split()) if tag_value is not None else 0 + # if interval_value is None: + # interval_value = f'{self.tdCom.dataDict["interval"]}s' + self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value, ext_stb=use_exist_stb) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + + if partition == "tbname": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_tbname_alias + + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_col_alias + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + elif partition == "tbname,t1,c1": + partition_elm_alias = f'{self.tdCom.partition_tbname_alias},t1,c1' + partiton_tb = "tbname,c1" + partition_elm_alias_tb = f'{self.tdCom.partition_tbname_alias},c1' + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if subtable: + if partition == "tbname": + if case_when: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + if subtable == "constant": + # stb_subtable_value = f'"{self.tdCom.ext_ctb_stream_des_table}"' + stb_subtable_value = f'"constant_{self.tdCom.ext_ctb_stream_des_table}"' + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(cast({subtable} as int unsigned) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = None + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + # self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.ext_tb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb) + if partition: + stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except) + else: + stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except) + if stream_sql: + tdSql.error(stream_sql) + return + start_time = self.tdCom.date_time + if subtable == "constant": + range_count = 1 + else: + range_count = self.tdCom.range_count + + for i in range(range_count): + latency = 0 + tag_value_list = list() + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + if tag_value: + if subtable == "constant": + tdSql.query(f'select {tag_value} from constant_{self.tdCom.ext_ctb_stream_des_table}') + else: + tdSql.query(f'select {tag_value} from {self.stb_name}') + tag_value_list = tdSql.queryResult + if not fill_value: + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm: + self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + elif stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: + self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, cast(max(c2) as tinyint), cast(min(c1) as smallint) from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + else: + if partition: + if tag_value == self.tdCom.exchange_tag_filter_des_select_elm: + self.tdCom.check_query_data(f'select {self.tdCom.partitial_tag_stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list) + elif tag_value == self.tdCom.cast_tag_filter_des_select_elm: + tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart') + limit_row = tdSql.queryRows + self.tdCom.check_query_data(f'select {self.tdCom.cast_tag_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select cast(t1 as TINYINT UNSIGNED),cast(t2 as varchar(256)),cast(t3 as bool) from {self.stb_name} order by ts limit {limit_row}') + tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};') + while list(set(tdSql.queryResult)) != [(None, None, None, None, None, None, None, None, None, None)]: + tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};') + if latency < self.tdCom.default_interval: + latency += 1 + time.sleep(1) + else: + return False + tdSql.checkEqual(list(set(tdSql.queryResult)), [(None, None, None, None, None, None, None, None, None, None)]) + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list) + else: + if use_exist_stb and not tag_value: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, use_exist_stb=use_exist_stb) + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, subtable=subtable) + + if subtable: + for tname in [self.stb_name]: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + else: + tdSql.query(f'select cast(cast(cast({c1_value[1]} as int unsigned) as bigint) as varchar(100))') + subtable_value = tdSql.queryResult[0][0] + if subtable == "constant": + return + else: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}`;') + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + def run(self): + self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition=None, subtable="constant", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + for delete in [True, False]: + for fill_history_value in [0, 1]: + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.partitial_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.exchange_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + # self-define tag + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.partitial_tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.exchange_tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition="t1 as t5,t2 as t11,t3 as t13", subtable=None, stb_field_name_value=None, tag_value="t5,t11,t13", use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t1", use_exist_stb=True) + # error cases + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value="", tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm.replace("c1","c19"), tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="ttt", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t15", use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="c5", use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as t14", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as c13", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,c13", use_exist_stb=True, use_except=True) + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/max_delay_interval.py b/tests/system-test/8-stream/max_delay_interval.py index 9306118e30..5efc4262a1 100644 --- a/tests/system-test/8-stream/max_delay_interval.py +++ b/tests/system-test/8-stream/max_delay_interval.py @@ -16,7 +16,7 @@ class TDTestCase: def watermark_max_delay_interval(self, interval, max_delay, watermark=None, fill_value=None, delete=False): tdLog.info(f"*** testing stream max_delay+interval: interval: {interval}, watermark: {watermark}, fill_value: {fill_value}, delete: {delete} ***") self.delete = delete - self.case_name = sys._getframe().f_code.co_name + self.tdCom.case_name = sys._getframe().f_code.co_name if watermark is not None: self.case_name = "watermark" + sys._getframe().f_code.co_name self.tdCom.prepare_data(interval=interval, watermark=watermark) @@ -24,8 +24,8 @@ class TDTestCase: self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' - self.tdCom.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' - self.tdCom.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' self.tdCom.date_time = 1658921623245 if watermark is not None: watermark_value = f'{self.tdCom.dataDict["watermark"]}s' @@ -40,11 +40,11 @@ class TDTestCase: fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' # create stb/ctb/tb stream self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) - self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) if fill_value: if "value" in fill_value.lower(): fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' - self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) init_num = 0 start_time = self.tdCom.date_time for i in range(self.tdCom.range_count): @@ -63,8 +63,8 @@ class TDTestCase: self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) if not fill_value: - for tbname in [self.stb_stream_des_table, self.tdCom.ctb_stream_des_table, self.tdCom.tb_stream_des_table]: - if tbname != self.tdCom.tb_stream_des_table: + for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') else: tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') diff --git a/tests/system-test/8-stream/max_delay_session.py b/tests/system-test/8-stream/max_delay_session.py index 874665dcc9..1a734e0e61 100644 --- a/tests/system-test/8-stream/max_delay_session.py +++ b/tests/system-test/8-stream/max_delay_session.py @@ -23,8 +23,8 @@ class TDTestCase: self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' - self.tdCom.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' - self.tdCom.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' self.tdCom.date_time = self.tdCom.dataDict["start_ts"] if watermark is not None: @@ -32,8 +32,8 @@ class TDTestCase: else: watermark_value = None max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s' - self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value) - self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value) init_num = 0 for i in range(self.tdCom.range_count): if i == 0: @@ -49,8 +49,8 @@ class TDTestCase: if self.tdCom.update and i%2 == 0: self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) - for tbname in [self.tdCom.ctb_stream_des_table, self.tdCom.tb_stream_des_table]: - if tbname != self.tdCom.tb_stream_des_table: + for tbname in [self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') else: tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') diff --git a/tests/system-test/8-stream/partition_interval.py b/tests/system-test/8-stream/partition_interval.py index f12cf038e0..0424932bf8 100644 --- a/tests/system-test/8-stream/partition_interval.py +++ b/tests/system-test/8-stream/partition_interval.py @@ -21,8 +21,8 @@ class TDTestCase: self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' - self.tdCom.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' - self.tdCom.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' ctb_name_list = list() for i in range(1, self.tdCom.range_count): ctb_name = self.tdCom.get_long_name() diff --git a/tests/system-test/8-stream/window_close_session_ext.py b/tests/system-test/8-stream/window_close_session_ext.py new file mode 100644 index 0000000000..33990bd821 --- /dev/null +++ b/tests/system-test/8-stream/window_close_session_ext.py @@ -0,0 +1,81 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_window_close_session_ext(self, session, watermark, fill_history_value=None, partition=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False): + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: + partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str + else: + partitial_tb_source_str = self.tdCom.ext_tb_source_select_str + if not stb_field_name_value: + stb_field_name_value = self.tdCom.tb_filter_des_select_elm + self.tdCom.case_name = sys._getframe().f_code.co_name + defined_tag_count = len(tag_value.split()) + if watermark is not None: + self.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value, ext_stb=use_exist_stb) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.tdCom.date_time = self.tdCom.dataDict["start_ts"] + if subtable: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({subtable} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None + else: + stb_subtable_value = None + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, subtable_value=stb_subtable_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb) + for i in range(self.tdCom.range_count): + if i == 0: + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + else: + self.tdCom.date_time = window_close_ts + 1 + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + if watermark_value is not None: + expected_value = i + 1 + for ts_value in [self.tdCom.date_time, window_close_ts-1]: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + else: + expected_value = i + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + + if fill_history_value: + self.tdCom.update_delete_history_data(delete=True) + if tag_value: + tdSql.query(f'select {tag_value} from {self.stb_name}') + tag_value_list = tdSql.queryResult + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart limit {expected_value};', sorted=True, defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition) + + def run(self): + for fill_history_value in [0, 1]: + self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), fill_history_value=fill_history_value, subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_state_window.py b/tests/system-test/8-stream/window_close_state_window.py index d6e6a2c093..4c978cb860 100644 --- a/tests/system-test/8-stream/window_close_state_window.py +++ b/tests/system-test/8-stream/window_close_state_window.py @@ -22,11 +22,11 @@ class TDTestCase: self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' - self.tdCom.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' - self.tdCom.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' state_window_col_name = self.tdCom.dataDict["state_window"] - self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} state_window({state_window_col_name})', trigger_mode="window_close") - self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} state_window({state_window_col_name})', trigger_mode="window_close") + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} state_window({state_window_col_name})', trigger_mode="window_close") + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} state_window({state_window_col_name})', trigger_mode="window_close") state_window_max = self.tdCom.dataDict['state_window_max'] state_window_value_inmem = 0 sleep_step = 0 From 10cd04349d382988ba52d364143fe2e88a161069 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Tue, 22 Aug 2023 18:57:29 +0800 Subject: [PATCH 06/16] test: add stream cases --- tests/pytest/util/common.py | 301 ++++++++++++++++-- tests/pytest/util/sql.py | 19 +- .../8-stream/at_once_interval_ext.py | 7 +- .../8-stream/max_delay_interval_ext.py | 101 ++++++ .../system-test/8-stream/pause_resume_test.py | 154 +++++++++ .../8-stream/window_close_session_ext.py | 10 +- 6 files changed, 561 insertions(+), 31 deletions(-) create mode 100644 tests/system-test/8-stream/max_delay_interval_ext.py create mode 100644 tests/system-test/8-stream/pause_resume_test.py diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 80053e66d1..f06e5d7e79 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -850,6 +850,28 @@ class TDCom: # stream def create_stream(self, stream_name, des_table, source_sql, trigger_mode=None, watermark=None, max_delay=None, ignore_expired=None, ignore_update=None, subtable_value=None, fill_value=None, fill_history_value=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + """create_stream + + Args: + stream_name (str): stream_name + des_table (str): target stable + source_sql (str): stream sql + trigger_mode (str, optional): at_once/window_close/max_delay. Defaults to None. + watermark (str, optional): watermark time. Defaults to None. + max_delay (str, optional): max_delay time. Defaults to None. + ignore_expired (int, optional): ignore expired data. Defaults to None. + ignore_update (int, optional): ignore update data. Defaults to None. + subtable_value (str, optional): subtable. Defaults to None. + fill_value (str, optional): fill. Defaults to None. + fill_history_value (int, optional): 0/1. Defaults to None. + stb_field_name_value (str, optional): existed stb. Defaults to None. + tag_value (str, optional): custom tag. Defaults to None. + use_exist_stb (bool, optional): use existed stb tag. Defaults to False. + use_except (bool, optional): Exception tag. Defaults to False. + + Returns: + str: stream + """ if subtable_value is None: subtable = "" else: @@ -923,20 +945,54 @@ class TDCom: else: return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};' + def pause_stream(self, stream_name, if_exist=True, if_not_exist=False): + """pause_stream + + Args: + stream_name (str): stream_name + if_exist (bool, optional): Defaults to True. + if_not_exist (bool, optional): Defaults to False. + """ + if_exist_value = "if exists" if if_exist else "" + if_not_exist_value = "if not exists" if if_not_exist else "" + tdSql.execute(f'pause stream {if_exist_value} {if_not_exist_value} {stream_name}') + + def resume_stream(self, stream_name, if_exist=True, if_not_exist=False, ignore_untreated=False): + """resume_stream + + Args: + stream_name (str): stream_name + if_exist (bool, optional): Defaults to True. + if_not_exist (bool, optional): Defaults to False. + ignore_untreated (bool, optional): Defaults to False. + """ + if_exist_value = "if exists" if if_exist else "" + if_not_exist_value = "if not exists" if if_not_exist else "" + ignore_untreated_value = "ignore untreated" if ignore_untreated else "" + tdSql.execute(f'resume stream {if_exist_value} {if_not_exist_value} {ignore_untreated_value} {stream_name}') def drop_all_streams(self): + """drop all streams + """ tdSql.query("show streams") stream_name_list = list(map(lambda x: x[0], tdSql.queryResult)) for stream_name in stream_name_list: tdSql.execute(f'drop stream if exists {stream_name};') def drop_db(self, dbname="test"): + """drop a db + + Args: + dbname (str, optional): Defaults to "test". + """ if dbname[0].isdigit(): tdSql.execute(f'drop database if exists `{dbname}`') else: tdSql.execute(f'drop database if exists {dbname}') def drop_all_db(self): + """drop all databases + """ tdSql.query("show databases;") db_list = list(map(lambda x: x[0], tdSql.queryResult)) for dbname in db_list: @@ -944,6 +1000,15 @@ class TDCom: tdSql.execute(f'drop database if exists `{dbname}`') def time_cast(self, time_value, split_symbol="+"): + """cast bigint to timestamp + + Args: + time_value (bigint): ts + split_symbol (str, optional): split sympol. Defaults to "+". + + Returns: + _type_: timestamp + """ ts_value = str(time_value).split(split_symbol)[0] if split_symbol in str(time_value): ts_value_offset = str(time_value).split(split_symbol)[1] @@ -952,6 +1017,8 @@ class TDCom: return f'cast({ts_value} as timestamp){split_symbol}{ts_value_offset}' def clean_env(self): + """drop all streams and databases + """ self.drop_all_streams() self.drop_all_db() @@ -966,9 +1033,16 @@ class TDCom: pass def genTs(self, precision="ms", ts="", protype="taosc", ns_tag=None): - """ - protype = "taosc" or "restful" - gen ts and datetime + """generate ts + + Args: + precision (str, optional): db precision. Defaults to "ms". + ts (str, optional): input ts. Defaults to "". + protype (str, optional): "taosc" or "restful". Defaults to "taosc". + ns_tag (_type_, optional): use ns. Defaults to None. + + Returns: + timestamp, datetime: timestamp and datetime """ if precision == "ns": if ts == "" or ts is None: @@ -1004,6 +1078,11 @@ class TDCom: return ts, dt def sgen_column_type_str(self, column_elm_list): + """generage column type str + + Args: + column_elm_list (list): column_elm_list + """ self.column_type_str = "" if column_elm_list is None: self.column_type_str = self.gen_default_column_str() @@ -1024,6 +1103,11 @@ class TDCom: self.column_type_str = self.default_colts_name + " timestamp, " + self.column_type_str.rstrip()[:-1] def sgen_tag_type_str(self, tag_elm_list): + """generage tag type str + + Args: + tag_elm_list (list): tag_elm_list + """ self.tag_type_str = "" if tag_elm_list is None: self.tag_type_str = self.gen_default_tag_str() @@ -1044,7 +1128,14 @@ class TDCom: self.tag_type_str = self.tag_type_str.rstrip()[:-1] if self.need_tagts: self.tag_type_str = self.default_tagts_name + " timestamp, " + self.tag_type_str + def sgen_tag_value_list(self, tag_elm_list, ts_value=None): + """generage tag value str + + Args: + tag_elm_list (list): _description_ + ts_value (timestamp, optional): Defaults to None. + """ if self.need_tagts: self.ts_value = self.genTs()[0] if ts_value is not None: @@ -1071,6 +1162,12 @@ class TDCom: self.tag_value_list = [self.ts_value] + self.tag_value_list def screateDb(self, dbname="test", drop_db=True, **kwargs): + """create database + + Args: + dbname (str, optional): Defaults to "test". + drop_db (bool, optional): Defaults to True. + """ tdLog.info("creating db ...") db_params = "" if len(kwargs) > 0: @@ -1087,6 +1184,21 @@ class TDCom: def screate_stable(self, dbname=None, stbname="stb", use_name="table", column_elm_list=None, tag_elm_list=None, need_tagts=False, count=1, default_stbname_prefix="stb", default_stbname_index_start_num=1, default_column_index_start_num=1, default_tag_index_start_num=1, **kwargs): + """_summary_ + + Args: + dbname (str, optional): Defaults to None. + stbname (str, optional): Defaults to "stb". + use_name (str, optional): stable/table, Defaults to "table". + column_elm_list (list, optional): use for sgen_column_type_str(), Defaults to None. + tag_elm_list (list, optional): use for sgen_tag_type_str(), Defaults to None. + need_tagts (bool, optional): tag use timestamp, Defaults to False. + count (int, optional): stable count, Defaults to 1. + default_stbname_prefix (str, optional): Defaults to "stb". + default_stbname_index_start_num (int, optional): Defaults to 1. + default_column_index_start_num (int, optional): Defaults to 1. + default_tag_index_start_num (int, optional): Defaults to 1. + """ tdLog.info("creating stable ...") if dbname is not None: self.dbname = dbname @@ -1115,6 +1227,21 @@ class TDCom: tdSql.execute(create_stable_sql) def screate_ctable(self, dbname=None, stbname=None, ctbname="ctb", use_name="table", tag_elm_list=None, ts_value=None, count=1, default_varchar_datatype="letters", default_nchar_datatype="letters", default_ctbname_prefix="ctb", default_ctbname_index_start_num=1, **kwargs): + """_summary_ + + Args: + dbname (str, optional): Defaults to None. + stbname (str, optional): Defaults to None. + ctbname (str, optional): Defaults to "ctb". + use_name (str, optional): Defaults to "table". + tag_elm_list (list, optional): use for sgen_tag_type_str(), Defaults to None. + ts_value (timestamp, optional): Defaults to None. + count (int, optional): ctb count, Defaults to 1. + default_varchar_datatype (str, optional): Defaults to "letters". + default_nchar_datatype (str, optional): Defaults to "letters". + default_ctbname_prefix (str, optional): Defaults to "ctb". + default_ctbname_index_start_num (int, optional): Defaults to 1. + """ tdLog.info("creating childtable ...") self.default_varchar_datatype = default_varchar_datatype self.default_nchar_datatype = default_nchar_datatype @@ -1150,6 +1277,13 @@ class TDCom: tdSql.execute(create_stable_sql) def sgen_column_value_list(self, column_elm_list, need_null, ts_value=None): + """_summary_ + + Args: + column_elm_list (list): gen_random_type_value() + need_null (bool): if insert null + ts_value (timestamp, optional): Defaults to None. + """ self.column_value_list = list() self.ts_value = self.genTs()[0] if ts_value is not None: @@ -1180,6 +1314,18 @@ class TDCom: def screate_table(self, dbname=None, tbname="tb", use_name="table", column_elm_list=None, count=1, default_tbname_prefix="tb", default_tbname_index_start_num=1, default_column_index_start_num=1, **kwargs): + """create ctable + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to "tb". + use_name (str, optional): Defaults to "table". + column_elm_list (list, optional): Defaults to None. + count (int, optional): Defaults to 1. + default_tbname_prefix (str, optional): Defaults to "tb". + default_tbname_index_start_num (int, optional): Defaults to 1. + default_column_index_start_num (int, optional): Defaults to 1. + """ tdLog.info("creating table ...") if dbname is not None: self.dbname = dbname @@ -1205,6 +1351,16 @@ class TDCom: tdSql.execute(create_table_sql) def sinsert_rows(self, dbname=None, tbname=None, column_ele_list=None, ts_value=None, count=1, need_null=False): + """insert rows + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to None. + column_ele_list (list, optional): Defaults to None. + ts_value (timestamp, optional): Defaults to None. + count (int, optional): Defaults to 1. + need_null (bool, optional): Defaults to False. + """ tdLog.info("stream inserting ...") if dbname is not None: self.dbname = dbname @@ -1245,6 +1401,15 @@ class TDCom: tdSql.execute(insert_sql) def sdelete_rows(self, dbname=None, tbname=None, start_ts=None, end_ts=None, ts_key=None): + """delete rows + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to None. + start_ts (timestamp, optional): range start. Defaults to None. + end_ts (timestamp, optional): range end. Defaults to None. + ts_key (str, optional): timestamp column name. Defaults to None. + """ if dbname is not None: self.dbname = dbname if tbname is not None: @@ -1271,8 +1436,13 @@ class TDCom: base_del_sql += f'where {ts_col_name} = {start_ts};' tdSql.execute(base_del_sql) - def check_stream_field_type(self, sql, input_function): + """confirm stream field + + Args: + sql (str): input sql + input_function (str): scalar + """ tdSql.query(sql) res = tdSql.queryResult if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]: @@ -1301,6 +1471,14 @@ class TDCom: tdSql.checkEqual(res[2][1], "DOUBLE") def round_handle(self, input_list): + """round list elem + + Args: + input_list (list): input value list + + Returns: + _type_: round list + """ tdLog.info("round rows ...") final_list = list() for i in input_list: @@ -1314,6 +1492,14 @@ class TDCom: return final_list def float_handle(self, input_list): + """float list elem + + Args: + input_list (list): input value list + + Returns: + _type_: float list + """ tdLog.info("float rows ...") final_list = list() for i in input_list: @@ -1327,10 +1513,26 @@ class TDCom: return final_list def str_ts_trans_bigint(self, str_ts): + """trans str ts to bigint + + Args: + str_ts (str): human-date + + Returns: + bigint: bigint-ts + """ tdSql.query(f'select cast({str_ts} as bigint)') return tdSql.queryResult[0][0] def cast_query_data(self, query_data): + """cast query-result for existed-stb + + Args: + query_data (list): query data list + + Returns: + list: new list after cast + """ tdLog.info("cast query data ...") col_type_list = self.column_type_str.split(',') tag_type_list = self.tag_type_str.split(',') @@ -1351,6 +1553,14 @@ class TDCom: return nl def trans_time_to_s(self, runtime): + """trans time to s + + Args: + runtime (str): 1d/1h/1m... + + Returns: + int: second + """ if "d" in str(runtime).lower(): d_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] s_num = float(d_num) * 24 * 60 * 60 @@ -1367,6 +1577,23 @@ class TDCom: return int(s_num) def check_query_data(self, sql1, sql2, sorted=False, fill_value=None, tag_value_list=None, defined_tag_count=None, partition=True, use_exist_stb=False, subtable=None, reverse_check=False): + """confirm query result + + Args: + sql1 (str): select .... + sql2 (str): select .... + sorted (bool, optional): if sort result list. Defaults to False. + fill_value (str, optional): fill. Defaults to None. + tag_value_list (list, optional): Defaults to None. + defined_tag_count (int, optional): Defaults to None. + partition (bool, optional): Defaults to True. + use_exist_stb (bool, optional): Defaults to False. + subtable (str, optional): Defaults to None. + reverse_check (bool, optional): not equal. Defaults to False. + + Returns: + bool: False if failed + """ tdLog.info("checking query data ...") if tag_value_list: dvalue = len(self.tag_type_str.split(',')) - defined_tag_count @@ -1485,6 +1712,16 @@ class TDCom: # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) def check_stream_res(self, sql, expected_res, max_delay): + """confirm stream result + + Args: + sql (str): select ... + expected_res (str): expected result + max_delay (int): max_delay value + + Returns: + bool: False if failed + """ tdSql.query(sql) latency = 0 @@ -1500,18 +1737,27 @@ class TDCom: tdSql.checkEqual(tdSql.queryRows, expected_res) def check_stream(self, sql1, sql2, expected_count, max_delay=None): + """confirm stream + + Args: + sql1 (str): select ... + sql2 (str): select ... + expected_count (int): expected_count + max_delay (int, optional): max_delay value. Defaults to None. + """ self.check_stream_res(sql1, expected_count, max_delay) self.check_query_data(sql1, sql2) def cal_watermark_window_close_session_endts(self, start_ts, watermark=None, session=None): """cal endts for close window - :param start_ts: [start timestamp: self.date_time] - :type start_ts: [epoch time] - :param watermark: [second level and > session] - :type watermark: [s] - :param precision: [default "ms" and only support "ms" now] - :type precision: str, optional + Args: + start_ts (epoch time): self.date_time + watermark (int, optional): > session. Defaults to None. + session (int, optional): Defaults to None. + + Returns: + int: as followed """ if watermark is not None: return start_ts + watermark*self.offset + 1 @@ -1521,14 +1767,13 @@ class TDCom: def cal_watermark_window_close_interval_endts(self, start_ts, interval, watermark=None): """cal endts for close window - :param start_ts: [start timestamp: self.date_time] - :type start_ts: [epoch time] - :param interval: [second level] - :type interval: [s] - :param watermark: [second level and > interval] - :type watermark: [s] - :param precision: [default "ms" and only support "ms" now] - :type precision: str, optional + Args: + start_ts (epoch time): self.date_time + interval (int): [s] + watermark (int, optional): [s]. Defaults to None. + + Returns: + _type_: _description_ """ if watermark is not None: return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + watermark*self.offset @@ -1536,6 +1781,11 @@ class TDCom: return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset def update_delete_history_data(self, delete): + """update and delete history data + + Args: + delete (bool): True/False + """ self.sinsert_rows(tbname=self.ctb_name, ts_value=self.record_history_ts) self.sinsert_rows(tbname=self.tb_name, ts_value=self.record_history_ts) if delete: @@ -1543,6 +1793,20 @@ class TDCom: self.sdelete_rows(tbname=self.tb_name, start_ts=self.time_cast(self.record_history_ts, "-")) def prepare_data(self, interval=None, watermark=None, session=None, state_window=None, state_window_max=127, interation=3, range_count=None, precision="ms", fill_history_value=0, ext_stb=None): + """prepare stream data + + Args: + interval (int, optional): Defaults to None. + watermark (int, optional): Defaults to None. + session (int, optional): Defaults to None. + state_window (str, optional): Defaults to None. + state_window_max (int, optional): Defaults to 127. + interation (int, optional): Defaults to 3. + range_count (int, optional): Defaults to None. + precision (str, optional): Defaults to "ms". + fill_history_value (int, optional): Defaults to 0. + ext_stb (bool, optional): Defaults to None. + """ self.clean_env() self.dataDict = { "stb_name" : f"{self.case_name}_stb", @@ -1595,6 +1859,5 @@ class TDCom: self.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) if i == 1: self.record_history_ts = ts_value - tdCom = TDCom() diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 2fa21b1983..91aac1929f 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -111,7 +111,7 @@ class TDSql: return self.error_info - def query(self, sql, row_tag=None,queryTimes=10): + def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None): self.sql = sql i=1 while i <= queryTimes: @@ -120,6 +120,17 @@ class TDSql: self.queryResult = self.cursor.fetchall() self.queryRows = len(self.queryResult) self.queryCols = len(self.cursor.description) + + if count_expected_res is not None: + counter = 0 + while count_expected_res != self.queryResult[0][0]: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + if counter < queryTimes: + counter += 0.5 + time.sleep(0.5) + else: + return False if row_tag: return self.queryResult return self.queryRows @@ -501,7 +512,8 @@ class TDSql: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) - tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) + # tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) + raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) def checkNotEqual(self, elm, expect_elm): if elm != expect_elm: @@ -509,7 +521,8 @@ class TDSql: else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) - tdLog.exit("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args) + tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args) + raise Exception def get_times(self, time_str, precision="ms"): caller = inspect.getframeinfo(inspect.stack()[1][0]) diff --git a/tests/system-test/8-stream/at_once_interval_ext.py b/tests/system-test/8-stream/at_once_interval_ext.py index aa9e5029f9..838f1e7c53 100644 --- a/tests/system-test/8-stream/at_once_interval_ext.py +++ b/tests/system-test/8-stream/at_once_interval_ext.py @@ -13,7 +13,8 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) self.tdCom = tdCom - def at_once_interval_ext(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, interval_value=None, subtable=None, case_when=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + def at_once_interval_ext(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, subtable=None, case_when=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + tdLog.info(f"*** testing stream at_once+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, delete: {delete}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***") if use_except: if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm or len(stb_field_name_value.split(",")) == len(self.tdCom.partitial_stb_filter_des_select_elm.split(",")): partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str @@ -33,8 +34,6 @@ class TDTestCase: self.delete = delete self.tdCom.case_name = sys._getframe().f_code.co_name defined_tag_count = len(tag_value.split()) if tag_value is not None else 0 - # if interval_value is None: - # interval_value = f'{self.tdCom.dataDict["interval"]}s' self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value, ext_stb=use_exist_stb) self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") @@ -60,8 +59,6 @@ class TDTestCase: partition_elm_alias = self.tdCom.partition_expression_alias elif partition == "tbname,t1,c1": partition_elm_alias = f'{self.tdCom.partition_tbname_alias},t1,c1' - partiton_tb = "tbname,c1" - partition_elm_alias_tb = f'{self.tdCom.partition_tbname_alias},c1' else: partition_elm_alias = self.tdCom.partition_tag_alias if subtable: diff --git a/tests/system-test/8-stream/max_delay_interval_ext.py b/tests/system-test/8-stream/max_delay_interval_ext.py new file mode 100644 index 0000000000..653fcd997c --- /dev/null +++ b/tests/system-test/8-stream/max_delay_interval_ext.py @@ -0,0 +1,101 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_max_delay_interval_ext(self, interval, max_delay, watermark=None, fill_value=None, partition="tbname", delete=False, fill_history_value=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False): + tdLog.info(f"*** testing stream max_delay+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, max_delay: {max_delay}, fill_history: {fill_history_value}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***") + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: + partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str + else: + partitial_tb_source_str = self.tdCom.ext_tb_source_select_str + if not stb_field_name_value: + stb_field_name_value = self.tdCom.tb_filter_des_select_elm + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + defined_tag_count = len(tag_value.split()) + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark, ext_stb=use_exist_stb) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + if subtable: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({subtable} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None + else: + stb_subtable_value = None + self.tdCom.date_time = 1658921623245 + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + + max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s' + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb) + + init_num = 0 + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + + if i == 0: + init_num = 2 + i + if watermark is not None: + init_num += 1 + else: + init_num += 1 + time.sleep(int(max_delay.replace("s", ""))) + if tag_value: + tdSql.query(f'select {tag_value} from {self.stb_name}') + tag_value_list = tdSql.queryResult + if not fill_value: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts;', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition) + + def run(self): + for delete in [True, False]: + for fill_history_value in [0, 1]: + self.watermark_max_delay_interval_ext(interval=random.choice([15]), watermark=random.randint(20, 25), max_delay=f"{random.randint(5, 6)}s", delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/pause_resume_test.py b/tests/system-test/8-stream/pause_resume_test.py new file mode 100644 index 0000000000..f5f1cf07fa --- /dev/null +++ b/tests/system-test/8-stream/pause_resume_test.py @@ -0,0 +1,154 @@ +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def pause_resume_test(self, interval, partition="tbname", delete=False, fill_history_value=None, pause=True, resume=True, ignore_untreated=False): + tdLog.info(f"*** testing stream pause+resume: interval: {interval}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, ignore_untreated: {ignore_untreated} ***") + if_exist_value_list = [None, True] + if_exist = random.choice(if_exist_value_list) + reverse_check = True if ignore_untreated else False + range_count = (self.tdCom.range_count + 3) * 3 + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + elif partition is None: + partition_elm_alias = '"no_partition"' + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname" or partition is None: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + if partition: + partition_elm = f'partition by {partition} {partition_elm_alias}' + else: + partition_elm = "" + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value) + for i in range(range_count): + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + if partition: + partition_elm = f'partition by {partition}' + else: + partition_elm = "" + # if i == int(range_count/2): + if i > 2 and i % 3 == 0: + for stream_name in [f'{self.stb_name}{self.tdCom.stream_suffix}', f'{self.ctb_name}{self.tdCom.stream_suffix}', f'{self.tb_name}{self.tdCom.stream_suffix}']: + if if_exist is not None: + tdSql.execute(f'pause stream if exists {stream_name}_no_exist') + tdSql.error(f'pause stream if not exists {stream_name}') + tdSql.error(f'pause stream {stream_name}_no_exist') + self.tdCom.pause_stream(stream_name, if_exist) + if pause and not resume and range_count-i <= 3: + time.sleep(self.tdCom.default_interval) + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {self.stb_name}{self.tdCom.des_table_suffix} order by wstart') + res_after_pause = tdSql.queryResult + if resume: + if i > 2 and i % 3 != 0: + for stream_name in [f'{self.stb_name}{self.tdCom.stream_suffix}', f'{self.ctb_name}{self.tdCom.stream_suffix}', f'{self.tb_name}{self.tdCom.stream_suffix}']: + if if_exist is not None: + tdSql.execute(f'resume stream if exists {stream_name}_no_exist') + tdSql.error(f'resume stream if not exists {stream_name}') + self.tdCom.resume_stream(stream_name, if_exist, None, ignore_untreated) + if pause and not resume: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {self.stb_name}{self.tdCom.des_table_suffix} order by wstart') + res_without_resume = tdSql.queryResult + tdSql.checkEqual(res_after_pause, res_without_resume) + else: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True, reverse_check=reverse_check) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True, reverse_check=reverse_check) + + if self.tdCom.subtable: + for tname in [self.stb_name, self.ctb_name]: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + + def run(self): + for delete in [True, False]: + for fill_history_value in [0, 1]: + # pause/resume + self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", ignore_untreated=False, fill_history_value=fill_history_value, delete=delete) + self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", ignore_untreated=True, fill_history_value=fill_history_value, delete=delete) + # self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", resume=False, fill_history_value=fill_history_value, delete=delete) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_session_ext.py b/tests/system-test/8-stream/window_close_session_ext.py index 33990bd821..0fc041e965 100644 --- a/tests/system-test/8-stream/window_close_session_ext.py +++ b/tests/system-test/8-stream/window_close_session_ext.py @@ -13,7 +13,8 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) self.tdCom = tdCom - def watermark_window_close_session_ext(self, session, watermark, fill_history_value=None, partition=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False): + def watermark_window_close_session_ext(self, session, watermark, fill_history_value=None, partition=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, delete=False): + tdLog.info(f"*** testing stream window_close+session+exist_stb+custom_tag: session: {session}, partition: {partition}, fill_history: {fill_history_value}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***") if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str else: @@ -61,15 +62,16 @@ class TDTestCase: self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) if fill_history_value: - self.tdCom.update_delete_history_data(delete=True) + self.tdCom.update_delete_history_data(delete=delete) if tag_value: tdSql.query(f'select {tag_value} from {self.stb_name}') tag_value_list = tdSql.queryResult self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart limit {expected_value};', sorted=True, defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition) def run(self): - for fill_history_value in [0, 1]: - self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), fill_history_value=fill_history_value, subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + #! TD-25893 + # self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, delete=False, fill_history_value=1) + self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, delete=True) def stop(self): tdSql.close() From e073e1c1ae325ec4cbd7e976e63c32c86f4bccb7 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Tue, 22 Aug 2023 19:02:17 +0800 Subject: [PATCH 07/16] test: add stream cases --- tests/pytest/util/common.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index f06e5d7e79..aa92f4d0a5 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -824,23 +824,22 @@ class TDCom: return False def get_path(tool="taosd"): - selfPath = os.path.dirname(os.path.realpath(__file__)) + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files or ("%s.exe"%tool) in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - paths.append(os.path.join(root, tool)) - break - if (len(paths) == 0): - return "" - return paths[0] + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] def dict2toml(in_dict: dict, file:str): if not isinstance(in_dict, dict): From 99a0a1538521851f90a45e2b829d352d9ddb1708 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 23 Aug 2023 09:25:07 +0800 Subject: [PATCH 08/16] test: resume three functions in common.py --- tests/pytest/util/common.py | 68 ++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index aa92f4d0a5..ea042829d6 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -813,40 +813,6 @@ class TDCom: """ return ','.join(map(lambda i: f'{gen_type}{i} {data_type}', range(count))) - def is_json(msg): - if isinstance(msg, str): - try: - json.loads(msg) - return True - except: - return False - else: - return False - - def get_path(tool="taosd"): - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files or ("%s.exe"%tool) in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - paths.append(os.path.join(root, tool)) - break - if (len(paths) == 0): - return "" - return paths[0] - - def dict2toml(in_dict: dict, file:str): - if not isinstance(in_dict, dict): - return "" - with open(file, 'w') as f: - toml.dump(in_dict, f) - # stream def create_stream(self, stream_name, des_table, source_sql, trigger_mode=None, watermark=None, max_delay=None, ignore_expired=None, ignore_update=None, subtable_value=None, fill_value=None, fill_history_value=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): """create_stream @@ -1859,4 +1825,38 @@ class TDCom: if i == 1: self.record_history_ts = ts_value +def is_json(msg): + if isinstance(msg, str): + try: + json.loads(msg) + return True + except: + return False + else: + return False + +def get_path(tool="taosd"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] + +def dict2toml(in_dict: dict, file:str): + if not isinstance(in_dict, dict): + return "" + with open(file, 'w') as f: + toml.dump(in_dict, f) + tdCom = TDCom() From e44206aaca606a6b30bda8937ae466ab0ef8b4ea Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 23 Aug 2023 16:50:38 +0800 Subject: [PATCH 09/16] test: update cases.task --- tests/parallel_test/cases.task | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c3d87315f5..71d5f597ed 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -6,6 +6,21 @@ ,,y,unit-test,bash test.sh #system test +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/scalar_function.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_session.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_state_window.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_state_window.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_session.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval_ext.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval_ext.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session_ext.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/partition_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/pause_resume_test.py + ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_str.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_math.py @@ -16,7 +31,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 2 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 4 From 3ae19e8e1bb5a9850e69bd08fcbe45727c6050ae Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Thu, 24 Aug 2023 15:41:16 +0800 Subject: [PATCH 10/16] test: update max_delay_interval.py --- tests/system-test/8-stream/max_delay_interval.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/system-test/8-stream/max_delay_interval.py b/tests/system-test/8-stream/max_delay_interval.py index 5efc4262a1..944c946554 100644 --- a/tests/system-test/8-stream/max_delay_interval.py +++ b/tests/system-test/8-stream/max_delay_interval.py @@ -62,13 +62,13 @@ class TDTestCase: if self.tdCom.update and i%2 == 0: self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) - if not fill_value: - for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: - if tbname != self.tb_stream_des_table: - tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') - else: - tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') - tdSql.checkEqual(tdSql.queryRows, init_num) + # if not fill_value: + # for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + # if tbname != self.tb_stream_des_table: + # tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + # else: + # tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + # tdSql.checkEqual(tdSql.queryRows, init_num) self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) @@ -149,7 +149,7 @@ class TDTestCase: for watermark in [None, random.randint(20, 25)]: self.watermark_max_delay_interval(interval=random.choice([15]), watermark=watermark, max_delay=f"{random.randint(5, 6)}s") for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: - self.watermark_max_delay_interval(interval=random.randint(10, 15), watermark=None, max_delay=f"{random.randint(5, 6)}s", fill_value=fill_value) + self.watermark_max_delay_interval(interval=random.randint(10, 15), watermark=None, max_delay=f"{random.randint(5, 6)}s", fill_value=fill_value) def stop(self): tdSql.close() From 4af0c984beb9274a3a075268d432088bbaf584f3 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Fri, 25 Aug 2023 16:37:34 +0800 Subject: [PATCH 11/16] test: update --- tests/system-test/8-stream/at_once_interval_ext.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/8-stream/at_once_interval_ext.py b/tests/system-test/8-stream/at_once_interval_ext.py index 838f1e7c53..8674e7f853 100644 --- a/tests/system-test/8-stream/at_once_interval_ext.py +++ b/tests/system-test/8-stream/at_once_interval_ext.py @@ -168,7 +168,7 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) def run(self): - self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition=None, subtable="constant", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + # self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition=None, subtable="constant", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) for delete in [True, False]: for fill_history_value in [0, 1]: self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) From 2f583718c02812c83aba584f87827bfa56ce5e04 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Fri, 25 Aug 2023 16:57:05 +0800 Subject: [PATCH 12/16] test: update --- tests/system-test/8-stream/at_once_interval.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/system-test/8-stream/at_once_interval.py b/tests/system-test/8-stream/at_once_interval.py index 7e082b9ef1..020b5f2a17 100644 --- a/tests/system-test/8-stream/at_once_interval.py +++ b/tests/system-test/8-stream/at_once_interval.py @@ -205,7 +205,8 @@ class TDTestCase: self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True) self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end') self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL") - for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + # for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value) self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True) From 11e64b329882b75d2ad0b6d2de0955245262c378 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 2 Sep 2023 16:35:32 +0800 Subject: [PATCH 13/16] fix(stream): add null ptr check. --- source/dnode/mnode/impl/src/mndStream.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 9455aae8e3..15ae93e4fd 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -2315,9 +2315,13 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { for (int32_t i = 0; i < req.numOfTasks; ++i) { STaskStatusEntry *p = taosArrayGet(req.pTaskStatus, i); int64_t k[2] = {p->streamId, p->taskId}; - int32_t index = *(int32_t *)taosHashGet(execNodeList.pTaskMap, &k, sizeof(k)); - STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, index); + int32_t* index = taosHashGet(execNodeList.pTaskMap, &k, sizeof(k)); + if (index == NULL) { + continue; + } + + STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, *index); pStatusEntry->status = p->status; if (p->status != TASK_STATUS__NORMAL) { mDebug("received s-task:0x%x not in ready status:%s", p->taskId, streamGetTaskStatusStr(p->status)); From 0b67ce0bfb258595460baf445f1326ed460de262 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Sep 2023 18:34:05 +0800 Subject: [PATCH 14/16] fix(stream): release stream task with max_delay option. --- source/libs/stream/src/streamMeta.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index ce62552478..8db6a81864 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -521,6 +521,13 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t ASSERT(pTask->status.timerActive == 0); doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id); + if (pTask->info.triggerParam != 0) { + qDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt); + taosTmrStop(pTask->schedInfo.pTimer); + pTask->info.triggerParam = 0; + streamMetaReleaseTask(pMeta, pTask); + } + streamMetaRemoveTask(pMeta, keys); streamMetaReleaseTask(pMeta, pTask); } else { From 1886bb03305dcbefb8172124055137a2f31a05f9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Sep 2023 19:36:33 +0800 Subject: [PATCH 15/16] fix(stream): remove invalid free. --- source/libs/stream/src/streamDispatch.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index cf04bcc1b8..39becca781 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -982,8 +982,6 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens } - streamFreeQitem(pTask->msgInfo.pData); - pTask->msgInfo.pData = NULL; return TSDB_CODE_SUCCESS; } From db8073daddff6f7281c9028659b4864689802657 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Sep 2023 23:13:57 +0800 Subject: [PATCH 16/16] fix(stream): fix memory leak. --- source/dnode/vnode/src/tq/tqStreamTask.c | 3 +++ source/libs/stream/src/streamMeta.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index 3c0321f300..58c7686aeb 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -383,6 +383,9 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus); taosThreadMutexUnlock(&pTask->lock); streamMetaReleaseTask(pStreamMeta, pTask); + if (pItem != NULL) { + streamFreeQitem(pItem); + } continue; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 8db6a81864..635915aeb6 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -521,7 +521,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t ASSERT(pTask->status.timerActive == 0); doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id); - if (pTask->info.triggerParam != 0) { + if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) { qDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt); taosTmrStop(pTask->schedInfo.pTimer); pTask->info.triggerParam = 0;