fix issue

This commit is contained in:
54liuyao 2024-11-05 18:14:35 +08:00
parent 8d19b071ba
commit 48d5f0e7cd
7 changed files with 663 additions and 12 deletions

View File

@ -40,6 +40,8 @@ extern "C" {
#define IS_CONTINUE_INTERVAL_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL)
#define IS_FILL_CONST_VALUE(type) ((type == TSDB_FILL_NULL || type == TSDB_FILL_NULL_F || type == TSDB_FILL_SET_VALUE || type == TSDB_FILL_SET_VALUE_F))
typedef struct SSliceRowData {
TSKEY key;
char pRowVal[];
@ -101,7 +103,6 @@ int32_t createStreamIntervalSliceOperatorInfo(struct SOperatorInfo* downstream,
SExecTaskInfo* pTaskInfo, SReadHandle* pHandle,
struct SOperatorInfo** ppOptInfo);
int32_t buildAllResultKey(SStreamAggSupporter* pAggSup, TSKEY ts, SArray* pUpdated);
void removeDuplicateTs(SArray* pTsArrray);
#ifdef __cplusplus
}

View File

@ -1263,6 +1263,21 @@ static int32_t doStreamForceFillImpl(SOperatorInfo* pOperator) {
for (int32_t i = 0; i < pBlock->info.rows; i++){
code = keepBlockRowInDiscBuf(pOperator, pFillInfo, pBlock, tsCol, i, groupId, pFillSup->rowSize);
QUERY_CHECK_CODE(code, lino, _end);
int32_t size = taosArrayGetSize(pInfo->pCloseTs);
if (size > 0) {
TSKEY* pTs = (TSKEY*) taosArrayGet(pInfo->pCloseTs, 0);
TSKEY resTs = tsCol[i];
while (resTs < (*pTs)) {
SWinKey key = {.groupId = groupId, .ts = resTs};
taosArrayPush(pInfo->pUpdated, &key);
if (IS_FILL_CONST_VALUE(pFillSup->type)) {
break;
}
resTs = taosTimeAdd(resTs, pFillSup->interval.sliding, pFillSup->interval.slidingUnit,
pFillSup->interval.precision);
}
}
}
code = pAggSup->stateStore.streamStateGroupPut(pAggSup->pState, groupId, NULL, 0);
QUERY_CHECK_CODE(code, lino, _end);
@ -1302,6 +1317,11 @@ _end:
return code;
}
static void removeDuplicateResult(SArray* pTsArrray, __compar_fn_t fn) {
taosArraySort(pTsArrray, fn);
taosArrayRemoveDuplicate(pTsArrray, fn, NULL);
}
// force window close
static int32_t doStreamForceFillNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
int32_t code = TSDB_CODE_SUCCESS;
@ -1369,14 +1389,13 @@ static int32_t doStreamForceFillNext(SOperatorInfo* pOperator, SSDataBlock** ppR
QUERY_CHECK_CODE(code, lino, _end);
}
removeDuplicateTs(pInfo->pCloseTs);
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pCloseTs); i++) {
TSKEY ts = *(TSKEY*) taosArrayGet(pInfo->pCloseTs, i);
code = buildAllResultKey(pInfo->pStreamAggSup, ts, pInfo->pUpdated);
QUERY_CHECK_CODE(code, lino, _end);
}
taosArrayClear(pInfo->pCloseTs);
taosArraySort(pInfo->pUpdated, winKeyCmprImpl);
removeDuplicateResult(pInfo->pUpdated, winKeyCmprImpl);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated);
pInfo->groupResInfo.freeItem = false;

View File

@ -129,7 +129,7 @@ static int32_t getIntervalSliceCurStateBuf(SStreamAggSupporter* pAggSup, SInterv
&curVLen, pWinCode);
QUERY_CHECK_CODE(code, lino, _end);
qDebug("===stream=== set stream twa next point buf.ts:%" PRId64 ", groupId:%" PRIu64 ", res:%d",
qDebug("===stream=== set stream twa cur point buf.ts:%" PRId64 ", groupId:%" PRIu64 ", res:%d",
curKey.ts, curKey.groupId, *pWinCode);
initIntervalSlicePoint(pAggSup, pTWin, groupId, pCurPoint);
@ -147,6 +147,8 @@ static int32_t getIntervalSliceCurStateBuf(SStreamAggSupporter* pAggSup, SInterv
STimeWindow prevSTW = {.skey = prevKey.ts};
prevSTW.ekey = taosTimeGetIntervalEnd(prevSTW.skey, pInterval);
initIntervalSlicePoint(pAggSup, &prevSTW, groupId, pPrevPoint);
qDebug("===stream=== set stream twa prev point buf.ts:%" PRId64 ", groupId:%" PRIu64 ", res:%d", pPrevPoint->winKey.win.skey,
pPrevPoint->winKey.groupId, prevWinCode);
} else {
SET_WIN_KEY_INVALID(pPrevPoint->winKey.win.skey);
SET_WIN_KEY_INVALID(pPrevPoint->winKey.win.ekey);
@ -195,6 +197,16 @@ void doStreamSliceInterpolation(SSliceRowData* pPrevWinVal, TSKEY winKey, TSKEY
}
}
void doSetElapsedEndKey(TSKEY winKey, SExprSupp* pSup) {
SqlFunctionCtx* pCtx = pSup->pCtx;
for (int32_t k = 0; k < pSup->numOfExprs; ++k) {
if (fmIsElapsedFunc(pCtx[k].functionId)) {
pCtx[k].end.key = winKey;
pCtx[k].end.val = 0;
}
}
}
static void resetIntervalSliceFunctionKey(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
for (int32_t k = 0; k < numOfOutput; ++k) {
pCtx[k].start.key = INT64_MIN;
@ -265,6 +277,7 @@ static int32_t doStreamIntervalSliceAggImpl(SOperatorInfo* pOperator, SSDataBloc
QUERY_CHECK_CODE(code, lino, _end);
resetIntervalSliceFunctionKey(pSup->pCtx, numOfOutput);
doSetElapsedEndKey(prevPoint.winKey.win.ekey, &pOperator->exprSupp);
doStreamSliceInterpolation(prevPoint.pLastRow, prevPoint.winKey.win.ekey, curTs, pBlock, startPos, &pOperator->exprSupp, INTERVAL_SLICE_END);
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &prevPoint.winKey.win, 1);
code = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos,
@ -628,9 +641,3 @@ _error:
(*ppOptInfo) = NULL;
return code;
}
void removeDuplicateTs(SArray* pTsArrray) {
__compar_fn_t fn = getKeyComparFunc(TSDB_DATA_TYPE_TIMESTAMP, TSDB_ORDER_ASC);
taosArraySort(pTsArrray, fn);
taosArrayRemoveDuplicate(pTsArrray, fn, NULL);
}

View File

@ -30,7 +30,6 @@
#define STREAM_TIME_SLICE_OP_STATE_NAME "StreamTimeSliceHistoryState"
#define STREAM_TIME_SLICE_OP_CHECKPOINT_NAME "StreamTimeSliceOperator_Checkpoint"
#define IS_FILL_CONST_VALUE(type) ((type == TSDB_FILL_NULL || type == TSDB_FILL_NULL_F || type == TSDB_FILL_SET_VALUE || type == TSDB_FILL_SET_VALUE_F))
int32_t saveTimeSliceWinResult(SWinKey* pKey, SSHashObj* pUpdatedMap) {
return tSimpleHashPut(pUpdatedMap, pKey, sizeof(SWinKey), NULL, 0);
@ -1743,6 +1742,12 @@ _end:
return code;
}
static void removeDuplicateTs(SArray* pTsArrray) {
__compar_fn_t fn = getKeyComparFunc(TSDB_DATA_TYPE_TIMESTAMP, TSDB_ORDER_ASC);
taosArraySort(pTsArrray, fn);
taosArrayRemoveDuplicate(pTsArrray, fn, NULL);
}
static int32_t doStreamTimeSliceNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;

View File

@ -4266,6 +4266,10 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
numOfElems = pInput->numOfRows; // since this is the primary timestamp, no need to exclude NULL values
if (numOfElems == 0) {
// for stream
if (pCtx->end.key != INT64_MIN) {
pInfo->max = pCtx->end.key + 1;
}
goto _elapsed_over;
}

View File

@ -1371,7 +1371,7 @@ int32_t getRowStatePrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SW
} else {
SWinKey* pPrevKey = taosArrayGet(pWinStates, index);
*pResKey = *pPrevKey;
return getHashSortRowBuff(pFileState, pResKey, ppVal, pVLen, pWinCode);
return addRowBuffIfNotExist(pFileState, (void*)pPrevKey, sizeof(SWinKey), ppVal, pVLen, pWinCode);
}
(*pWinCode) = TSDB_CODE_FAILED;

View File

@ -0,0 +1,615 @@
import sys
import threading
from util.log import *
from util.sql import *
from util.cases import *
from util.common import *
class TDTestCase:
updatecfgDict = {"debugFlag": 135, "asynclog": 0}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tdCom = tdCom
def get_stream_first_ts(self, table_name1, table_name2):
tdSql.query(
f'select * from {table_name1}{table_name2} order by 1 '
)
res_ts = tdSql.getData(0, 0)
return res_ts
def force_window_close(
self,
interval,
partition="tbname",
funciton_name="",
funciton_name_alias="",
delete=False,
fill_value=None,
fill_history_value=None,
case_when=None,
ignore_expired=1,
ignore_update=1,
):
# partition must be tbname, and not NONE.
tdLog.info(
f"*** testing stream force_window_close+interp+every: every: {interval}, partition: {partition}, fill_history: {fill_history_value}, fill: {fill_value}, delete: {delete}, case_when: {case_when} ***"
)
self.tdCom.subtable = False
col_value_type = "Incremental" if partition == "c1" else "random"
custom_col_index = 1 if partition == "c1" else None
self.tdCom.custom_col_val = 0
self.delete = delete
self.tdCom.case_name = sys._getframe().f_code.co_name
self.tdCom.prepare_data(
interval=interval,
fill_history_value=fill_history_value,
custom_col_index=custom_col_index,
col_value_type=col_value_type,
)
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
self.stb_stream_des_table = f"{self.stb_name}{self.tdCom.des_table_suffix}"
self.ctb_stream_des_table = f"{self.ctb_name}{self.tdCom.des_table_suffix}"
self.tb_stream_des_table = f"{self.tb_name}{self.tdCom.des_table_suffix}"
if partition == "tbname":
partition_elm_alias = self.tdCom.partition_tbname_alias
elif partition == "c1":
partition_elm_alias = self.tdCom.partition_col_alias
elif partition == "abs(c1)":
partition_elm_alias = self.tdCom.partition_expression_alias
elif partition is None:
partition_elm_alias = '"no_partition"'
else:
partition_elm_alias = self.tdCom.partition_tag_alias
if partition:
partition_elm = f"partition by {partition} {partition_elm_alias}"
else:
partition_elm = ""
if fill_value:
if "value" in fill_value.lower():
fill_value = "VALUE,1"
# create error stream
tdLog.info("create error stream")
sleep(10)
tdSql.error(
f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 0 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;"
)
tdSql.error(
f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 0 IGNORE UPDATE 1 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;"
)
tdSql.error(
f"create stream itp_force_error_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 1 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;"
)
tdSql.error(
f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 0 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c11,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;"
)
# function name : interp
trigger_mode = "force_window_close"
# # subtable is true
# create stream add :subtable_value=stb_subtable_value or subtable_value=ctb_subtable_value
# no subtable
# create stream super table and child table
tdLog.info("create stream super table and child table")
self.tdCom.create_stream(
stream_name=f"{self.stb_name}{self.tdCom.stream_suffix}",
des_table=self.stb_stream_des_table,
source_sql=f'select _irowts as irowts,tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {partition_elm} every({self.tdCom.dataDict["interval"]}s)',
trigger_mode=trigger_mode,
fill_value=fill_value,
fill_history_value=fill_history_value,
ignore_expired=ignore_expired,
ignore_update=ignore_update,
)
self.tdCom.create_stream(
stream_name=f"{self.ctb_name}{self.tdCom.stream_suffix}",
des_table=self.ctb_stream_des_table,
source_sql=f'select _irowts as irowts, tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.ctb_name} {partition_elm} every({self.tdCom.dataDict["interval"]}s)',
trigger_mode=trigger_mode,
fill_value=fill_value,
fill_history_value=fill_history_value,
ignore_expired=ignore_expired,
ignore_update=ignore_update,
)
# creat stream set filter of tag and tbname
tdLog.info("create stream with tag and tbname filter")
tag_t1_value = self.tdCom.tag_value_list[0]
where_tag = f"where t1 = {tag_t1_value}"
where_tbname = f'where tbname="{self.ctb_name}"'
# print(f"tag: {tag_t1_value}")
self.stb_stream_des_where_tag_table = (
f"{self.stb_name}_where_tag{self.tdCom.des_table_suffix}"
)
self.tdCom.create_stream(
stream_name=f"{self.stb_name}_where_tag{self.tdCom.stream_suffix}",
des_table=self.stb_stream_des_where_tag_table,
source_sql=f'select _irowts as irowts,tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {where_tag} {partition_elm} every({self.tdCom.dataDict["interval"]}s)',
trigger_mode=trigger_mode,
fill_value=fill_value,
fill_history_value=fill_history_value,
ignore_expired=ignore_expired,
ignore_update=ignore_update,
)
self.stb_stream_des_where_tbname_table = (
f"{self.stb_name}_where_tbname{self.tdCom.des_table_suffix}"
)
self.tdCom.create_stream(
stream_name=f"{self.stb_name}_where_tbname{self.tdCom.stream_suffix}",
des_table=self.stb_stream_des_where_tbname_table,
source_sql=f'select _irowts as irowts,tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {where_tbname} {partition_elm} every({self.tdCom.dataDict["interval"]}s)',
trigger_mode=trigger_mode,
fill_value=fill_value,
fill_history_value=fill_history_value,
ignore_expired=ignore_expired,
ignore_update=ignore_update,
)
# set partition by tag and column
self.stb_stream_des_partition_tag_table = (
f"{self.stb_name}_partition_tag{self.tdCom.des_table_suffix}"
)
self.stb_stream_des_partition_column1_table = (
f"{self.stb_name}_partition_column1{self.tdCom.des_table_suffix}"
)
self.stb_stream_des_partition_column2_table = (
f"{self.stb_name}_partition_column2{self.tdCom.des_table_suffix}"
)
if partition:
tdLog.info("create stream with partition by tag and tbname ")
partition_elm_new = f"partition by {partition}, t1"
self.tdCom.create_stream(
stream_name=f"{self.stb_name}_partition_tag{self.tdCom.stream_suffix}",
des_table=self.stb_stream_des_partition_tag_table,
source_sql=f'select _irowts as irowts, tbname as table_name, t1 as t_t1, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {partition_elm_new} every({self.tdCom.dataDict["interval"]}s)',
trigger_mode=trigger_mode,
fill_value=fill_value,
fill_history_value=fill_history_value,
ignore_expired=ignore_expired,
ignore_update=ignore_update,
)
partition_elm_new = f"partition by {partition}, c1"
self.tdCom.create_stream(
stream_name=f"{self.stb_name}_partition_column1{self.tdCom.stream_suffix}",
des_table=f"{self.stb_name}_partition_column1{self.tdCom.des_table_suffix}",
source_sql=f'select _irowts as irowts, tbname as table_name, c1 as c_c1, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {partition_elm_new} every({self.tdCom.dataDict["interval"]}s)',
trigger_mode=trigger_mode,
fill_value=fill_value,
fill_history_value=fill_history_value,
ignore_expired=ignore_expired,
ignore_update=ignore_update,
)
partition_elm_new = f"partition by {partition}, c2"
self.tdCom.create_stream(
stream_name=f"{self.stb_name}_partition_column2{self.tdCom.stream_suffix}",
des_table=f"{self.stb_name}_partition_column2{self.tdCom.des_table_suffix}",
source_sql=f'select _irowts as irowts, tbname as table_name, c2 as c_c2, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {partition_elm_new} every({self.tdCom.dataDict["interval"]}s)',
trigger_mode=trigger_mode,
fill_value=fill_value,
fill_history_value=fill_history_value,
ignore_expired=ignore_expired,
ignore_update=ignore_update,
)
if fill_value:
if "value" in fill_value.lower():
fill_value = "VALUE,1"
# create stream general table
tdLog.info("create stream general table")
self.tdCom.create_stream(
stream_name=f"{self.tb_name}{self.tdCom.stream_suffix}",
des_table=self.tb_stream_des_table,
source_sql=f'select _irowts as irowts,tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.tb_name} every({self.tdCom.dataDict["interval"]}s)',
trigger_mode=trigger_mode,
fill_value=fill_value,
fill_history_value=fill_history_value,
ignore_expired=ignore_expired,
ignore_update=ignore_update,
)
# wait and check stream_task status is ready
time.sleep(self.tdCom.dataDict["interval"])
tdSql.query("show streams")
tdLog.info(f"tdSql.queryResult:{tdSql.queryResult},tdSql.queryRows:{tdSql.queryRows}")
localQueryResult = tdSql.queryResult
for stream_number in range(tdSql.queryRows):
stream_name = localQueryResult[stream_number][0]
tdCom.check_stream_task_status(
stream_name=stream_name, vgroups=2, stream_timeout=20,check_wal_info=False
)
time.sleep(self.tdCom.dataDict["interval"])
time.sleep(30)
# insert data
self.tdCom.date_time = self.tdCom.genTs(precision=self.tdCom.precision)[0]
start_time = self.tdCom.date_time
time.sleep(1)
tdSql.query("select 1;")
start_force_ts = str(0)
for i in range(self.tdCom.range_count):
cur_time = str(self.tdCom.date_time + self.tdCom.dataDict["interval"])
ts_value = (
cur_time + f"+{i*10 + 30}s"
)
# print(ts_value)
if start_force_ts == "0":
start_force_ts = cur_time
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
self.tdCom.sinsert_rows(
tbname=self.tdCom.ctb_name,
ts_value=ts_value,
custom_col_index=custom_col_index,
col_value_type=col_value_type,
)
if i % 2 == 0:
self.tdCom.sinsert_rows(
tbname=self.tdCom.ctb_name,
ts_value=ts_value,
custom_col_index=custom_col_index,
col_value_type=col_value_type,
)
if self.delete and i % 2 != 0:
self.tdCom.sdelete_rows(
tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value
)
self.tdCom.date_time += 1
self.tdCom.sinsert_rows(
tbname=self.tdCom.tb_name,
ts_value=ts_value,
custom_col_index=custom_col_index,
col_value_type=col_value_type,
)
if i % 2 == 0:
self.tdCom.sinsert_rows(
tbname=self.tdCom.tb_name,
ts_value=ts_value,
custom_col_index=custom_col_index,
col_value_type=col_value_type,
)
if self.delete and i % 2 != 0:
self.tdCom.sdelete_rows(
tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value
)
self.tdCom.date_time += 1
if self.tdCom.subtable:
for tname in [self.stb_name, self.ctb_name]:
group_id = self.tdCom.get_group_id_from_stb(f"{tname}_output")
tdSql.query(f"select * from {self.ctb_name}")
ptn_counter = 0
for c1_value in tdSql.queryResult:
if partition == "c1":
tbname = self.tdCom.get_subtable_wait(
f"{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}"
)
tdSql.query(f"select count(*) from `{tbname}`")
elif partition is None:
tbname = self.tdCom.get_subtable_wait(
f"{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}"
)
tdSql.query(f"select count(*) from `{tbname}`")
elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1])
tbname = self.tdCom.get_subtable_wait(
f"{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}"
)
tdSql.query(f"select count(*) from `{tbname}`")
elif partition == "tbname" and ptn_counter == 0:
tbname = self.tdCom.get_subtable_wait(
f"{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}_{tname}_output_{group_id}"
)
tdSql.query(f"select count(*) from `{tbname}`")
ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
group_id = self.tdCom.get_group_id_from_stb(f"{self.tb_name}_output")
tdSql.query(f"select * from {self.tb_name}")
ptn_counter = 0
for c1_value in tdSql.queryResult:
if partition == "c1":
tbname = self.tdCom.get_subtable_wait(
f"{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}"
)
tdSql.query(f"select count(*) from `{tbname}`")
elif partition is None:
tbname = self.tdCom.get_subtable_wait(
f"{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}"
)
tdSql.query(f"select count(*) from `{tbname}`")
elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1])
tbname = self.tdCom.get_subtable_wait(
f"{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}"
)
tdSql.query(f"select count(*) from `{tbname}`")
elif partition == "tbname" and ptn_counter == 0:
tbname = self.tdCom.get_subtable_wait(
f"{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}_{self.tb_name}_output_{group_id}"
)
tdSql.query(f"select count(*) from `{tbname}`")
ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
if fill_value:
end_date_time = self.tdCom.date_time
final_range_count = self.tdCom.range_count
history_ts = (
str(start_time)
+ f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
)
start_ts = self.tdCom.time_cast(history_ts, "-")
future_ts = (
str(end_date_time)
+ f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
)
end_ts = self.tdCom.time_cast(future_ts)
tdSql.query("select 2;")
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
self.tdCom.date_time = start_time
# update
history_ts = (
str(start_time)
+ f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
)
start_ts = self.tdCom.time_cast(history_ts, "-")
future_ts = (
str(end_date_time)
+ f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
)
end_ts = self.tdCom.time_cast(future_ts)
tdSql.query("select 3;")
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
# get query time range using interval count windows
tdSql.query(
f'select _wstart, _wend ,last(ts) from {self.stb_name} where ts >= {start_force_ts} and ts <= {end_ts} partition by tbname interval({self.tdCom.dataDict["interval"]}s)fill ({fill_value}) '
)
# getData don't support negative index
end_new_ts = tdSql.getData(tdSql.queryRows - 1, 1)
end_last_but_one_ts = tdSql.getData(tdSql.queryRows - 2, 1)
# source data include that fill valuse is null and "_isfilled" column of the stream output is false
tdSql.execute(
f'insert into {self.ctb_name} (ts,c1) values("{end_new_ts}",-102) '
)
tdSql.execute(
f'insert into {self.tb_name} (ts,c1) values("{end_new_ts}",-51) '
)
tdSql.execute(
f'insert into {self.ctb_name} (ts,c1) values("{end_last_but_one_ts}",NULL) '
)
tdSql.query("select 4;")
for i in range(self.tdCom.range_count):
ts_value = (
str(self.tdCom.date_time + self.tdCom.dataDict["interval"])
+ f"+{i*10+30}s"
)
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
self.tdCom.date_time += 1
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
self.tdCom.date_time += 1
if self.delete:
self.tdCom.sdelete_rows(
tbname=self.ctb_name,
start_ts=self.tdCom.time_cast(start_time),
end_ts=ts_cast_delete_value,
)
self.tdCom.sdelete_rows(
tbname=self.tb_name,
start_ts=self.tdCom.time_cast(start_time),
end_ts=ts_cast_delete_value,
)
# wait for the stream to process the data
# print(self.tdCom.dataDict["interval"]*(final_range_count+2))
time.sleep(self.tdCom.dataDict["interval"] * (final_range_count + 2))
# check the data
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
tdLog.info(f"tbname:{tbname}")
tdSql.query(
f'select _wstart, _wend ,last(ts) from {tbname} where ts >= {start_force_ts} and ts <= {end_ts} partition by tbname interval({self.tdCom.dataDict["interval"]}s)fill ({fill_value}) '
)
start_new_ts = tdSql.getData(0, 1)
ragne_start_ts = start_new_ts
if tbname == self.ctb_name:
if partition == "tbname":
# check data for child table
tdLog.info("check data for child table ")
if fill_value != "PREV":
ragne_start_ts = self.get_stream_first_ts(tbname, self.tdCom.des_table_suffix)
self.tdCom.check_query_data(
f'select irowts, table_name, isfilled, {funciton_name_alias} from {tbname}{self.tdCom.des_table_suffix} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts',
f'select _irowts as irowts ,tb1 as table_name, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from ( select *, tbname as tb1 from {tbname} where ts >= {start_force_ts} ) partition by tb1 range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts',
fill_value=fill_value,
)
elif tbname == self.stb_name:
if partition == "tbname":
# check data for super table
tdLog.info("check data for super table")
if fill_value != "PREV":
ragne_start_ts = self.get_stream_first_ts(tbname, self.tdCom.des_table_suffix)
self.tdCom.check_query_data(
f'select irowts, table_name, isfilled, {funciton_name_alias} from {tbname}{self.tdCom.des_table_suffix} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts',
f'select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from {tbname} where ts >= {start_force_ts} partition by {partition} range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts',
fill_value=fill_value,
)
# tag and tbname filter
tdLog.info("check data for tag and tbname filter")
if fill_value != "PREV":
ragne_start_ts = self.get_stream_first_ts(self.stb_stream_des_where_tag_table, '')
self.tdCom.check_query_data(
f'select irowts, table_name, isfilled, {funciton_name_alias} from {self.stb_stream_des_where_tag_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts',
f'select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from {tbname} {where_tag} and ts >= {start_force_ts} partition by {partition} range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts',
fill_value=fill_value,
)
if fill_value != "PREV":
ragne_start_ts = self.get_stream_first_ts(self.stb_stream_des_where_tbname_table, '')
self.tdCom.check_query_data(
f'select irowts, table_name, isfilled, {funciton_name_alias} from {self.stb_stream_des_where_tbname_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts',
f'select _irowts as irowts ,tb1 as table_name, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from ( select *, tbname as tb1 from {tbname} {where_tbname} and ts >= {start_force_ts} ) partition by tb1 range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts',
fill_value=fill_value,
)
# check partition by tag and column(c1 or c2)
tdLog.info("check data for partition by tag and column")
if fill_value != "PREV":
ragne_start_ts = self.get_stream_first_ts(self.stb_stream_des_partition_tag_table, '')
self.tdCom.check_query_data(
f'select irowts, table_name, t_t1, isfilled, {funciton_name_alias} from {self.stb_stream_des_partition_tag_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by t_t1, irowts',
f'select _irowts as irowts ,tb1 as table_name, t1 as t_t1, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from ( select *, tbname as tb1 from {tbname} {where_tbname} and ts >= {start_force_ts} ) partition by tb1,t1 range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by t_t1, irowts',
fill_value=fill_value,
)
if fill_value == "PREV":
self.tdCom.check_query_data(
f'select irowts, c_c1, isfilled, {funciton_name_alias} from {self.stb_stream_des_partition_column1_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by c_c1, irowts',
f'select _irowts as irowts , c1 as c_c1, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from {tbname} {where_tbname} and ts >= {start_force_ts} partition by {partition},c1 range("{start_new_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by c_c1, irowts',
fill_value=fill_value,
)
self.tdCom.check_query_data(
f'select irowts, c_c2, isfilled, {funciton_name_alias} from {self.stb_stream_des_partition_column2_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by c_c2, irowts',
f'select _irowts as irowts , c2 as c_c2, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from {tbname} {where_tbname} and ts >= {start_force_ts} partition by {partition},c2 range("{start_new_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by c_c2, irowts',
fill_value=fill_value,
)
else:
if partition == "tbname":
# check data for general table
if fill_value != "PREV":
ragne_start_ts = self.get_stream_first_ts(self.stb_stream_des_partition_tag_table, '')
self.tdCom.check_query_data(
f'select irowts, isfilled, {funciton_name_alias} from {tbname}{self.tdCom.des_table_suffix} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts',
f'select _irowts as irowts , _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from ( select *, tbname as tb1 from {tbname} where ts >= {start_force_ts} ) partition by tb1 range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts',
fill_value=fill_value,
)
# Recreate a sub-table that meets the filtering "where_tag" and check if the streaming results are automatically included within it."
where_tag_ctbname = f"{self.ctb_name}_where_tag"
where_tag_ctbname_other_tag = f"{self.ctb_name}_where_tag_1"
tag_t1_value_other = abs(tag_t1_value)-1
tdSql.execute(
f"create table {where_tag_ctbname} using {self.stb_name} (t1) tags({tag_t1_value}) "
)
tdSql.execute(
f"create table {where_tag_ctbname_other_tag} using {self.stb_name} (t1) tags({tag_t1_value_other}) "
)
where_tag_timestamp = self.tdCom.genTs(precision=self.tdCom.precision)[0]
where_tag_ts_start_value = str(where_tag_timestamp) + "+2s"
tdSql.query("select 5;")
self.tdCom.sinsert_rows(
tbname=where_tag_ctbname, ts_value=where_tag_ts_start_value
)
self.tdCom.sinsert_rows(
tbname=where_tag_ctbname_other_tag, ts_value=where_tag_ts_start_value
)
time.sleep(self.tdCom.dataDict["interval"])
for _ in range(self.tdCom.dataDict["interval"]):
tdSql.query(
f"select distinct(table_name) from {self.stb_stream_des_where_tag_table} where table_name=\"{where_tag_ctbname}\""
)
if tdSql.queryRows > 0:
if tdSql.checkDataNotExit(0,0, where_tag_ctbname):
break
else:
time.sleep(1)
if self.delete:
self.tdCom.sdelete_rows(
tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value
)
self.tdCom.sdelete_rows(
tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value
)
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
if tbname != self.tb_name:
if "value" in fill_value.lower():
fill_value = (
"VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"
)
if partition == "tbname":
self.tdCom.check_query_data(
f"select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart",
f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart',
fill_value=fill_value,
)
else:
self.tdCom.check_query_data(
f"select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`",
f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`',
fill_value=fill_value,
)
else:
if "value" in fill_value.lower():
fill_value = "VALUE,1,2,3,6,7,8,9,10,11"
if partition == "tbname":
self.tdCom.check_query_data(
f"select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart",
f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart',
fill_value=fill_value,
)
else:
self.tdCom.check_query_data(
f"select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`",
f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`',
fill_value=fill_value,
)
def run(self):
for fill_value in ["PREV", "VALUE","NULL"]:
self.force_window_close(
interval=10,
partition="tbname",
funciton_name="interp(c1)",
funciton_name_alias="intp_c1",
delete=False,
ignore_update=1,
fill_value=fill_value,
)
self.force_window_close(
interval=8,
partition="tbname",
funciton_name="interp(c1)",
funciton_name_alias="intp_c1",
delete=False,
ignore_update=1,
fill_value="PREV",
)
# self.force_window_close(interval=random.randint(10, 15), partition="c1", ignore_update=1)
# self.force_window_close(interval=random.randint(10, 15), partition="abs(c1)", ignore_update=1)
# self.force_window_close(interval=random.randint(10, 15), partition=None, delete=True)
# self.force_window_close(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
# self.force_window_close(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL")
# for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
# # for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
# self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value)
# self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())