From e1e5a7780cc500ed18c8ee696ce97fd6c954634d Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 5 Jul 2024 10:49:44 +0800 Subject: [PATCH 01/60] revise fill start and end timestamp --- source/libs/executor/src/filloperator.c | 36 +++++++++++++- tests/system-test/2-query/fill.py | 62 +++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index 4b71c5ee3f..1f03f27a0f 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -59,6 +59,7 @@ static void revisedFillStartKey(SFillOperatorInfo* pInfo, SSDataBlock* pBlock, i static void destroyFillOperatorInfo(void* param); static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag); static void fillResetPrevForNewGroup(SFillInfo* pFillInfo); +static void reviseFillStartAndEndKey(SFillOperatorInfo* pInfo, int32_t order); static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, int32_t order) { @@ -74,7 +75,8 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp blockDataCleanup(pInfo->pRes); doApplyScalarCalculation(pOperator, pInfo->existNewGroupBlock, order, scanFlag); - revisedFillStartKey(pInfo, pInfo->existNewGroupBlock, order); + //revisedFillStartKey(pInfo, pInfo->existNewGroupBlock, order); + reviseFillStartAndEndKey(pOperator->info, order); int64_t ts = (order == TSDB_ORDER_ASC) ? pInfo->existNewGroupBlock->info.window.ekey : pInfo->existNewGroupBlock->info.window.skey; @@ -258,7 +260,8 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { if (pInfo->curGroupId == 0 || (pInfo->curGroupId == pInfo->pRes->info.id.groupId)) { if (pInfo->curGroupId == 0 && taosFillNotStarted(pInfo->pFillInfo)) { - revisedFillStartKey(pInfo, pBlock, order); + //revisedFillStartKey(pInfo, pBlock, order); + reviseFillStartAndEndKey(pInfo, order); } pInfo->curGroupId = pInfo->pRes->info.id.groupId; // the first data block @@ -549,3 +552,32 @@ _error: taosMemoryFreeClear(pOperator); return code; } + +static void reviseFillStartAndEndKey(SFillOperatorInfo* pInfo, int32_t order) { + int64_t skey, ekey, next; + if (order == TSDB_ORDER_ASC) { + skey = taosTimeTruncate(pInfo->win.skey, &pInfo->pFillInfo->interval); + taosFillUpdateStartTimestampInfo(pInfo->pFillInfo, skey); + + ekey = taosTimeTruncate(pInfo->win.ekey, &pInfo->pFillInfo->interval); + next = ekey; + while (next < pInfo->win.ekey) { + next = taosTimeAdd(ekey, pInfo->pFillInfo->interval.sliding, pInfo->pFillInfo->interval.slidingUnit, + pInfo->pFillInfo->interval.precision); + ekey = next > pInfo->win.ekey ? ekey : next; + } + pInfo->win.ekey = ekey; + } else { + assert(order == TSDB_ORDER_DESC); + skey = taosTimeTruncate(pInfo->win.skey, &pInfo->pFillInfo->interval); + taosFillUpdateStartTimestampInfo(pInfo->pFillInfo, skey); + + next = skey; + while (next < pInfo->win.skey) { + next = taosTimeAdd(skey, pInfo->pFillInfo->interval.sliding, pInfo->pFillInfo->interval.slidingUnit, + pInfo->pFillInfo->interval.precision); + skey = next > pInfo->win.skey ? skey : next; + } + pInfo->win.ekey = taosTimeTruncate(pInfo->win.ekey, &pInfo->pFillInfo->interval); + } +} diff --git a/tests/system-test/2-query/fill.py b/tests/system-test/2-query/fill.py index f5cd2d5855..274e0710bd 100644 --- a/tests/system-test/2-query/fill.py +++ b/tests/system-test/2-query/fill.py @@ -1,3 +1,5 @@ +import random +from pandas._libs import interval import taos import sys @@ -15,7 +17,67 @@ class TDTestCase: #tdSql.init(conn.cursor()) tdSql.init(conn.cursor(), logSql) # output sql.txt file + def generate_fill_range(self, data_start: int, data_end: int, interval: int, step: int)-> list: + ret = [] + begin = data_start - 10 * interval + end = data_end + 10 * interval + for i in range(begin, end, step): + for j in range(begin, end, step): + ret.append((i,j)) + return ret + + def check_fill_range(self, where_start, where_end, res, sql: str): + if len(res) == 0: + tdLog.debug(f'fill sql got no rows {sql}') + return + if len(res) == 1: + tdLog.debug(f'fill sql got one row {sql}: {res}') + else: + first = res[0] + last = res[-1] + tdLog.debug(f'fill sql got rows {sql}: {res}') + + def generate_partition_by(self): + val = random.random() + if val < 0.6: + return "" + elif val < 0.8: + return "partition by location" + else: + return "partition by tbname" + + def generate_fill_interval(self)-> list[tuple]: + ret = [] + intervals = [1, 30, 60, 90, 120, 300, 3600] + for i in range(0, len(intervals)): + for j in range(0, i+1): + ret.append((intervals[i], intervals[j])) + return ret + + def generate_fill_sql(self, where_start, where_end, fill_interval: tuple) -> str: + partition_by = self.generate_partition_by() + where = f'ts >= {where_start} and ts < {where_end}' + return f'select _wstart, _wend, count(*) from meters {where} {partition_by} interval({fill_interval[0]}s) sliding({fill_interval[1]}s) fill(NULL)' + + def test_fill_range(self): + os.system('taosBenchmark -t 1000 -n 1000 -v 2 -S 32000 -y') + data_start = 1500000000 + data_end = 1500031968 + step = 100 + + fill_intervals: list[tuple] = self.generate_fill_interval() + fill_interval = random.choice(fill_intervals) + ranges = self.generate_fill_range(data_start, data_end, fill_interval[0], step) + range = random.choice(ranges) + sql = self.generate_fill_sql(range[0], range[1], fill_interval) + tdSql.query(sql) + res = tdSql.queryResult + self.check_fill_range(range[0], range[1], res, sql) + + ## tdSql.execute('drop database test') + def run(self): + self.test_fill_range() dbname = "db" tbname = "tb" From 0e868735beb99b8f3d9c40bf47b0a4934895a701 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Wed, 10 Jul 2024 19:10:17 +0800 Subject: [PATCH 02/60] fill add test --- tests/system-test/2-query/fill.py | 74 +++++++++++++++++++++---------- 1 file changed, 51 insertions(+), 23 deletions(-) diff --git a/tests/system-test/2-query/fill.py b/tests/system-test/2-query/fill.py index 274e0710bd..8ea4622830 100644 --- a/tests/system-test/2-query/fill.py +++ b/tests/system-test/2-query/fill.py @@ -1,8 +1,10 @@ import random +from fabric2.runners import threading from pandas._libs import interval import taos import sys +from util.common import TDCom from util.log import * from util.sql import * from util.cases import * @@ -26,16 +28,20 @@ class TDTestCase: ret.append((i,j)) return ret - def check_fill_range(self, where_start, where_end, res, sql: str): - if len(res) == 0: - tdLog.debug(f'fill sql got no rows {sql}') + def check_fill_range(self, where_start, where_end, res_asc, res_desc, sql: str, interval): + if len(res_asc) != len(res_desc): + tdLog.exit(f"err, asc desc with different rows, asc: {len(res_asc)}, desc: {len(res_desc)} sql: {sql}") + if len(res_asc) == 0: + tdLog.info(f'from {where_start} to {where_end} no rows returned') return - if len(res) == 1: - tdLog.debug(f'fill sql got one row {sql}: {res}') + asc_first = res_asc[0] + asc_last = res_asc[-1] + desc_first = res_desc[0] + desc_last = res_desc[-1] + if asc_first[0] != desc_last[0] or asc_last[0] != desc_first[0]: + tdLog.exit(f'fill sql different row data {sql}: asc<{asc_first[0].timestamp()}, {asc_last[0].timestamp()}>, desc<{desc_last[0].timestamp()}, {desc_first[0].timestamp()}>') else: - first = res[0] - last = res[-1] - tdLog.debug(f'fill sql got rows {sql}: {res}') + tdLog.info(f'from {where_start} to {where_end} same time returned asc<{asc_first[0].timestamp()}, {asc_last[0].timestamp()}>, desc<{desc_last[0].timestamp()}, {desc_first[0].timestamp()}> interval: {interval}') def generate_partition_by(self): val = random.random() @@ -48,36 +54,58 @@ class TDTestCase: def generate_fill_interval(self)-> list[tuple]: ret = [] - intervals = [1, 30, 60, 90, 120, 300, 3600] + intervals = [60, 90, 120, 300, 3600] for i in range(0, len(intervals)): for j in range(0, i+1): ret.append((intervals[i], intervals[j])) return ret - def generate_fill_sql(self, where_start, where_end, fill_interval: tuple) -> str: + def generate_fill_sql(self, where_start, where_end, fill_interval: tuple): partition_by = self.generate_partition_by() - where = f'ts >= {where_start} and ts < {where_end}' - return f'select _wstart, _wend, count(*) from meters {where} {partition_by} interval({fill_interval[0]}s) sliding({fill_interval[1]}s) fill(NULL)' + where = f'where ts >= {where_start} and ts < {where_end}' + sql = f'select _wstart, _wend, count(*) from test.meters {where} {partition_by} interval({fill_interval[0]}s) sliding({fill_interval[1]}s) fill(NULL)' + sql_asc = sql + " order by _wstart asc" + sql_desc = sql + " order by _wstart desc" + return sql_asc, sql_desc + + def fill_test_thread_routine(self, cli: TDSql, interval, data_start, data_end, step): + ranges = self.generate_fill_range(data_start, data_end, interval[0], step) + for range in ranges: + sql_asc, sql_desc = self.generate_fill_sql(range[0], range[1], interval) + cli.query(sql_asc, queryTimes=1) + asc_res = cli.queryResult + cli.query(sql_desc, queryTimes=1) + desc_res = cli.queryResult + self.check_fill_range(range[0], range[1], asc_res,desc_res , sql_asc, interval) def test_fill_range(self): - os.system('taosBenchmark -t 1000 -n 1000 -v 2 -S 32000 -y') - data_start = 1500000000 - data_end = 1500031968 - step = 100 + os.system('taosBenchmark -t 10 -n 10000 -v 8 -S 32000 -y') + data_start = 1500000000000 + data_end = 1500319968000 + step = 2000000 + tdCom = TDCom() + inses: list[TDSql] = [] + threads: list[threading.Thread] = [] fill_intervals: list[tuple] = self.generate_fill_interval() - fill_interval = random.choice(fill_intervals) - ranges = self.generate_fill_range(data_start, data_end, fill_interval[0], step) - range = random.choice(ranges) - sql = self.generate_fill_sql(range[0], range[1], fill_interval) - tdSql.query(sql) - res = tdSql.queryResult - self.check_fill_range(range[0], range[1], res, sql) + for interval in fill_intervals: + ins = tdCom.newTdSql() + t = threading.Thread(target=self.fill_test_thread_routine, args=(ins, interval, data_start, data_end, step)) + t.start() + inses.append(ins) + threads.append(t) + + for t in threads: + t.join() + + for ins in inses: + ins.close() ## tdSql.execute('drop database test') def run(self): self.test_fill_range() + return dbname = "db" tbname = "tb" From 2c3cddb8b4f564202963091241c42ca1431157d0 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Thu, 11 Jul 2024 15:59:53 +0800 Subject: [PATCH 03/60] add tests for fix fill asc/desc --- docs/en/12-taos-sql/12-distinguished.md | 1 + docs/zh/12-taos-sql/12-distinguished.md | 1 + source/libs/executor/src/filloperator.c | 5 +- tests/system-test/2-query/fill.py | 68 +++++++++++++++-------- tests/system-test/2-query/test_td28163.py | 2 +- 5 files changed, 50 insertions(+), 27 deletions(-) diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index bfc9ca32c0..8eecb706c0 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -102,6 +102,7 @@ The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described be 1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000. 2. The result set is in ascending order of timestamp when you aggregate by time window. 3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `PARTITION BY` is not used in the query, the result set will be returned in strict ascending order of timestamp; otherwise the result set will be returned in the order of ascending timestamp in each group. +4. The output windows of Fill are related with time range of WHERE Clause. For asc fill, the first output window is the first window that conains the start time of WHERE clause. The last output window is the last window that contains the end time of WHERE clause. ::: diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md index 0eaeb0dfa7..50bf36d2e1 100755 --- a/docs/zh/12-taos-sql/12-distinguished.md +++ b/docs/zh/12-taos-sql/12-distinguished.md @@ -97,6 +97,7 @@ NULL, NULL_F, VALUE, VALUE_F 这几种填充模式针对不同场景区别如下 1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。 2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 PARTITION BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 PARTITION BY 语句分组,则返回结果中每个 PARTITION 内按照时间序列严格单调递增。 +4. Fill输出的起始和结束窗口与WHERE条件的时间范围有关, 如增序Fill时, 第一个窗口是包含WHERE条件开始时间的第一个窗口, 最后一个窗口是包含WHERE条件结束时间的最后一个窗口。 ::: diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index 1f03f27a0f..c4ef74608a 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -75,7 +75,6 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp blockDataCleanup(pInfo->pRes); doApplyScalarCalculation(pOperator, pInfo->existNewGroupBlock, order, scanFlag); - //revisedFillStartKey(pInfo, pInfo->existNewGroupBlock, order); reviseFillStartAndEndKey(pOperator->info, order); int64_t ts = (order == TSDB_ORDER_ASC) ? pInfo->existNewGroupBlock->info.window.ekey @@ -260,7 +259,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { if (pInfo->curGroupId == 0 || (pInfo->curGroupId == pInfo->pRes->info.id.groupId)) { if (pInfo->curGroupId == 0 && taosFillNotStarted(pInfo->pFillInfo)) { - //revisedFillStartKey(pInfo, pBlock, order); reviseFillStartAndEndKey(pInfo, order); } @@ -570,14 +568,13 @@ static void reviseFillStartAndEndKey(SFillOperatorInfo* pInfo, int32_t order) { } else { assert(order == TSDB_ORDER_DESC); skey = taosTimeTruncate(pInfo->win.skey, &pInfo->pFillInfo->interval); - taosFillUpdateStartTimestampInfo(pInfo->pFillInfo, skey); - next = skey; while (next < pInfo->win.skey) { next = taosTimeAdd(skey, pInfo->pFillInfo->interval.sliding, pInfo->pFillInfo->interval.slidingUnit, pInfo->pFillInfo->interval.precision); skey = next > pInfo->win.skey ? skey : next; } + taosFillUpdateStartTimestampInfo(pInfo->pFillInfo, skey); pInfo->win.ekey = taosTimeTruncate(pInfo->win.ekey, &pInfo->pFillInfo->interval); } } diff --git a/tests/system-test/2-query/fill.py b/tests/system-test/2-query/fill.py index 8ea4622830..64a43bd80a 100644 --- a/tests/system-test/2-query/fill.py +++ b/tests/system-test/2-query/fill.py @@ -1,3 +1,4 @@ +import queue import random from fabric2.runners import threading from pandas._libs import interval @@ -12,6 +13,7 @@ from util.cases import * class TDTestCase: + updatecfgDict = {'asynclog': 0, 'ttlUnit': 1, 'ttlPushInterval': 5, 'ratioOfVnodeStreamThrea': 4, 'numOfVnodeQueryThreads': 80} def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) @@ -19,7 +21,7 @@ class TDTestCase: #tdSql.init(conn.cursor()) tdSql.init(conn.cursor(), logSql) # output sql.txt file - def generate_fill_range(self, data_start: int, data_end: int, interval: int, step: int)-> list: + def generate_fill_range(self, data_start: int, data_end: int, interval: int, step: int): ret = [] begin = data_start - 10 * interval end = data_end + 10 * interval @@ -52,9 +54,10 @@ class TDTestCase: else: return "partition by tbname" - def generate_fill_interval(self)-> list[tuple]: + def generate_fill_interval(self): ret = [] - intervals = [60, 90, 120, 300, 3600] + #intervals = [60, 90, 120, 300, 3600] + intervals = [120, 300, 3600] for i in range(0, len(intervals)): for j in range(0, i+1): ret.append((intervals[i], intervals[j])) @@ -63,9 +66,9 @@ class TDTestCase: def generate_fill_sql(self, where_start, where_end, fill_interval: tuple): partition_by = self.generate_partition_by() where = f'where ts >= {where_start} and ts < {where_end}' - sql = f'select _wstart, _wend, count(*) from test.meters {where} {partition_by} interval({fill_interval[0]}s) sliding({fill_interval[1]}s) fill(NULL)' - sql_asc = sql + " order by _wstart asc" - sql_desc = sql + " order by _wstart desc" + sql = f'select first(_wstart), last(_wstart) from (select _wstart, _wend, count(*) from test.meters {where} {partition_by} interval({fill_interval[0]}s) sliding({fill_interval[1]}s) fill(NULL)' + sql_asc = sql + " order by _wstart asc) t" + sql_desc = sql + " order by _wstart desc) t" return sql_asc, sql_desc def fill_test_thread_routine(self, cli: TDSql, interval, data_start, data_end, step): @@ -78,34 +81,55 @@ class TDTestCase: desc_res = cli.queryResult self.check_fill_range(range[0], range[1], asc_res,desc_res , sql_asc, interval) - def test_fill_range(self): - os.system('taosBenchmark -t 10 -n 10000 -v 8 -S 32000 -y') + def fill_test_task_routine(self, tdCom: TDCom, queue: queue.Queue): + cli = tdCom.newTdSql() + while True: + m: list = queue.get() + if len(m) == 0: + break + interval = m[0] + range = m[1] + sql_asc, sql_desc = self.generate_fill_sql(range[0], range[1], interval) + cli.query(sql_asc, queryTimes=1) + asc_res = cli.queryResult + cli.query(sql_desc, queryTimes=1) + desc_res = cli.queryResult + self.check_fill_range(range[0], range[1], asc_res,desc_res , sql_asc, interval) + cli.close() + + def schedule_fill_test_tasks(self): + num: int = 20 + threads = [] + tdCom = TDCom() + q: queue.Queue = queue.Queue() + for _ in range(num): + t = threading.Thread(target=self.fill_test_task_routine, args=(tdCom, q)) + t.start() + threads.append(t) + data_start = 1500000000000 data_end = 1500319968000 - step = 2000000 - tdCom = TDCom() - inses: list[TDSql] = [] - threads: list[threading.Thread] = [] + step = 30000000 fill_intervals: list[tuple] = self.generate_fill_interval() for interval in fill_intervals: - ins = tdCom.newTdSql() - t = threading.Thread(target=self.fill_test_thread_routine, args=(ins, interval, data_start, data_end, step)) - t.start() - inses.append(ins) - threads.append(t) + ranges = self.generate_fill_range(data_start, data_end, interval[0], step) + for r in ranges: + q.put([interval, r]) + + for _ in range(num): + q.put([]) for t in threads: t.join() - for ins in inses: - ins.close() - - ## tdSql.execute('drop database test') + def test_fill_range(self): + os.system('taosBenchmark -t 10 -n 10000 -v 8 -S 32000 -y') + self.schedule_fill_test_tasks() + tdSql.execute('drop database test') def run(self): self.test_fill_range() - return dbname = "db" tbname = "tb" diff --git a/tests/system-test/2-query/test_td28163.py b/tests/system-test/2-query/test_td28163.py index a101549b66..005d78d075 100644 --- a/tests/system-test/2-query/test_td28163.py +++ b/tests/system-test/2-query/test_td28163.py @@ -176,7 +176,7 @@ class TDTestCase: def test_query_with_window(self): # time window tdSql.query("select sum(c_int_empty) from st where ts > '2024-01-01 00:00:00.000' and ts <= '2024-01-01 14:00:00.000' interval(5m) sliding(1m) fill(value, 10);") - tdSql.checkRows(841) + tdSql.checkRows(845) tdSql.checkData(0, 0, 10) tdSql.query("select _wstart, _wend, sum(c_int) from st where ts > '2024-01-01 00:00:00.000' and ts <= '2024-01-01 14:00:00.000' interval(5m) sliding(1m);") From 86b06d0a7a91a61ff4564ae01b2670cab00b5bf2 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 1 Aug 2024 09:42:55 +0800 Subject: [PATCH 04/60] fix: PkOrder Mem --- source/libs/executor/src/tsort.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index fa7d59e137..896b4db7cd 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -1699,6 +1699,7 @@ static int32_t initRowIdSort(SSortHandle* pHandle) { taosArrayDestroy(pHandle->pSortInfo); pHandle->pSortInfo = pOrderInfoList; + pHandle->cmpParam.pPkOrder = (pHandle->bSortPk) ? taosArrayGet(pHandle->pSortInfo, 1) : NULL; return TSDB_CODE_SUCCESS; } From ddbf300f238104c0f3318397f80ff76b5da76e11 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 12:37:36 +0800 Subject: [PATCH 05/60] test: scan returned values in ci --- Jenkinsfile2 | 11 +- source/libs/parser/src/parAstCreater.c | 2 + tests/ci/filter_for_return_values | 24 +++ tests/ci/scan_file_path.py | 199 +++++++++++++++++++++++++ tests/parallel_test/container_build.sh | 2 +- 5 files changed, 234 insertions(+), 4 deletions(-) create mode 100644 tests/ci/filter_for_return_values create mode 100644 tests/ci/scan_file_path.py diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 904c8b1651..b6275d0f6d 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -69,7 +69,9 @@ def check_docs() { echo "docs PR" docs_only=1 } else { - echo file_changed + echo file_changed + mkdir -p ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} + echo file_changed > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt } } } @@ -350,7 +352,6 @@ pipeline { when { allOf { not { expression { env.CHANGE_BRANCH =~ /docs\// }} - not { expression { env.CHANGE_URL =~ /\/TDinternal\// }} } } parallel { @@ -401,7 +402,7 @@ pipeline { } } stage('linux test') { - agent{label "slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "} + agent{label "slave1_47 "} options { skipDefaultCheckout() } when { changeRequest() @@ -425,6 +426,10 @@ pipeline { cd ${WKC}/tests/parallel_test time ./container_build.sh -w ${WKDIR} -e ''' + sh ''' + cd ${WKC}/tests/ci + python3 scan_file_path.py -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt + ''' def extra_param = "" def log_server_file = "/home/log_server.json" def timeout_cmd = "" diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index cd7cda01e0..a8979bbe1d 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -207,6 +207,8 @@ static bool checkIndexName(SAstCreateContext* pCxt, SToken* pIndexName) { return false; } return true; + + } static bool checkTopicName(SAstCreateContext* pCxt, SToken* pTopicName) { diff --git a/tests/ci/filter_for_return_values b/tests/ci/filter_for_return_values new file mode 100644 index 0000000000..734619a8af --- /dev/null +++ b/tests/ci/filter_for_return_values @@ -0,0 +1,24 @@ +match callExpr( + hasParent(anyOf( + compoundStmt(), + doStmt(hasCondition(expr().bind("cond")))) + ), + unless(hasType(voidType())), + unless(callee(functionDecl(hasName("memcpy")))), + unless(callee(functionDecl(hasName("strcpy")))), + unless(callee(functionDecl(hasName("strcat")))), + unless(callee(functionDecl(hasName("strncpy")))), + unless(callee(functionDecl(hasName("memset")))), + unless(callee(functionDecl(hasName("memmove")))), + unless(callee(functionDecl(hasName("sprintf")))), + unless(callee(functionDecl(hasName("snprintf")))), + unless(callee(functionDecl(hasName("scanf")))), + unless(callee(functionDecl(hasName("sncanf")))), + unless(callee(functionDecl(hasName("printf")))), + unless(callee(functionDecl(hasName("printRow")))), + unless(callee(functionDecl(hasName("puts")))), + unless(callee(functionDecl(hasName("sleep")))), + unless(callee(functionDecl(hasName("printResult")))), + unless(callee(functionDecl(hasName("getchar")))), + unless(callee(functionDecl(hasName("taos_print_row")))), + unless(callee(functionDecl(hasName("fprintf"))))) \ No newline at end of file diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py new file mode 100644 index 0000000000..92f287e797 --- /dev/null +++ b/tests/ci/scan_file_path.py @@ -0,0 +1,199 @@ +import os +import sys +import subprocess +import csv +from datetime import datetime +from loguru import logger +import getopt + +opts, args = getopt.gnu_getopt(sys.argv[1:], 'b:f:', [ + 'branch_name=']) +for key, value in opts: + if key in ['-h', '--help']: + print( + 'Usage: python3 scan.py -b -f ') + print('-b branch name or PR ID to scan') + print('-f change files list') + + sys.exit(0) + + if key in ['-b', '--branchName']: + branch_name = value + if key in ['-f', '--filesName']: + change_file_list = value + + +# the base source code file path +self_path = os.path.dirname(os.path.realpath(__file__)) + +if ("community" in self_path): + source_path = self_path[:self_path.find("community")] + work_path = source_path[:source_path.find("TDinternal")] + +else: + source_path = self_path[:self_path.find("tests")] + work_path = source_path[:source_path.find("TDengine")] + +# Check if "community" or "tests" is in self_path +index_community = self_path.find("community") +if index_community != -1: + source_path = self_path[:index_community] + index_TDinternal = source_path.find("TDinternal") + # Check if index_TDinternal is valid and set work_path accordingly + if index_TDinternal != -1: + work_path = source_path[:index_TDinternal] +else: + index_tests = self_path.find("tests") + if index_tests != -1: + source_path = self_path[:index_tests] + # Check if index_TDengine is valid and set work_path accordingly + index_TDengine = source_path.find("TDengine") + if index_TDengine != -1: + work_path = source_path[:index_TDengine] + + +# log file path +log_file_path = f"{source_path}/../{branch_name}/" +os.makedirs(log_file_path, exist_ok=True) + +scan_log_file = f"{log_file_path}/scan.log" +logger.add(scan_log_file, rotation="10MB", retention="7 days", level="DEBUG") +print(self_path,work_path,source_path,log_file_path) + +# scan result base path +scan_result_base_path = f"{log_file_path}/clang_scan_result/" + + +# the compile commands json file path +# compile_commands_path = f"{source_path}/../debugNoSan/compile_commands.json" +compile_commands_path = f"{source_path}/debug/compile_commands.json" + +# the ast parser rule for c file +clang_scan_rules_path = f"{self_path}/filter_for_return_values" + +# all the c files path will be checked +all_file_path = [] + +class CommandExecutor: + def __init__(self): + self._process = None + + def execute(self, command, timeout=None): + try: + self._process = subprocess.Popen(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = self._process.communicate(timeout=timeout) + return stdout.decode('utf-8'), stderr.decode('utf-8') + except subprocess.TimeoutExpired: + self._process.kill() + self._process.communicate() + raise Exception("Command execution timeout") + except Exception as e: + raise Exception("Command execution failed: %s" % e) + +def scan_files_path(source_file_path): + # scan_dir_list = ["source", "include", "docs/examples", "tests/script/api", "src/plugins"] + scan_dir_list = ["source", "include", "docs/examples", "src/plugins"] + scan_skip_file_list = ["/root/charles/TDinternal/community/tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", + "/test/", "contrib", "debug", "deps", "/root/charles/TDinternal/community/source/libs/parser/src/sql.c", "/root/charles/TDinternal/community/source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"] + for root, dirs, files in os.walk(source_file_path): + for file in files: + if any(item in root for item in scan_dir_list): + file_path = os.path.join(root, file) + if (file_path.endswith(".c") or file_path.endswith(".h") or file_path.endswith(".cpp")) and all(item not in file_path for item in scan_skip_file_list): + all_file_path.append(file_path) + logger.info("Found %s files" % len(all_file_path)) + +def input_files(change_files): + # scan_dir_list = ["source", "include", "docs/examples", "tests/script/api", "src/plugins"] + scan_dir_list = ["source", "include", "docs/examples", "src/plugins"] + scan_skip_file_list = [f"{source_path}/TDinternal/community/tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", "/test/", "contrib", "debug", "deps", f"{source_path}/TDinternal/community/source/libs/parser/src/sql.c", f"{source_path}/TDinternal/community/source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"] + with open(change_files, 'r') as file: + for line in file: + file_name = line.strip() + if any(dir_name in file_name for dir_name in scan_dir_list): + if (file_name.endswith(".c") or file_name.endswith(".h") or line.endswith(".cpp")) and all(dir_name not in file_name for dir_name in scan_skip_file_list): + if "enterprise" in file_name: + file_name = os.path.join(source_path, file_name) + else: + tdc_file_path = os.path.join(source_path, "community/") + file_name = os.path.join(tdc_file_path, file_name) + all_file_path.append(file_name) + print(f"all_file_path:{all_file_path}") + # for file_path in change_files: + # if (file_path.endswith(".c") or file_path.endswith(".h") or file_path.endswith(".cpp")) and all(item not in file_path for item in scan_skip_file_list): + # all_file_path.append(file_path) + logger.info("Found %s files" % len(all_file_path)) +file_res_path = "" + +def save_scan_res(res_base_path, file_path, out, err): + global file_res_path + file_res_path = os.path.join(res_base_path, file_path.replace(f"{work_path}", "").split(".")[0] + ".res") + print(f"file_res_path:{file_res_path},res_base_path:{res_base_path},file_path:{file_path}") + if not os.path.exists(os.path.dirname(file_res_path)): + os.makedirs(os.path.dirname(file_res_path)) + logger.info("Save scan result to: %s" % file_res_path) + + # save scan result + with open(file_res_path, "w") as f: + f.write(err) + f.write(out) + logger.debug(f"file_res_file: {file_res_path}") + +def write_csv(file_path, data): + try: + with open(file_path, 'w') as f: + writer = csv.writer(f) + writer.writerows(data) + except Exception as ex: + raise Exception("Failed to write the csv file: {} with msg: {}".format(file_path, repr(ex))) + +if __name__ == "__main__": + command_executor = CommandExecutor() + # get all the c files path + # scan_files_path(source_path) + input_files(change_file_list) + print(f"all_file_path:{all_file_path}") + res = [] + res.append(["scan_source_file", "scan_result_file", "match_num", "check_result"]) + # create dir + current_time = datetime.now().strftime("%Y%m%d%H%M%S") + scan_result_path = os.path.join(scan_result_base_path, current_time) + if not os.path.exists(scan_result_path): + os.makedirs(scan_result_path) + for file in all_file_path: + cmd = f"clang-query-10 -p {compile_commands_path} {file} -f {clang_scan_rules_path}" + print(f"cmd:{cmd}") + try: + stdout, stderr = command_executor.execute(cmd) + lines = stdout.split("\n") + if lines[-2].endswith("matches.") or lines[-2].endswith("match."): + match_num = int(lines[-2].split(" ")[0]) + logger.info("The match lines of file %s: %s" % (file, match_num)) + if match_num > 0: + logger.info(f"scan_result_path: {scan_result_path} ,file:{file}") + save_scan_res(scan_result_path, file, stdout, stderr) + res.append([file, file_res_path, match_num, 'Pass' if match_num == 0 else 'Fail']) + else: + logger.warning("The result of scan is invalid for: %s" % file) + except Exception as e: + logger.error("Execute command failed: %s" % e) + # data = "" + # for item in res: + # data += item[0] + "," + str(item[1]) + "\n" + # logger.info("Csv data: %s" % data) + write_csv(os.path.join(scan_result_path, "scan_res.csv"), res) + scan_result_log = f"{scan_result_path}/scan_res.csv" + # delete the first element of res + res= res[1:] + logger.info("The result of scan: \n") + logger.info("Total scan files: %s" % len(res)) + logger.info("Total match lines: %s" % sum([item[2] for item in res])) + logger.info(f"scan log file : {scan_result_log}") + logger.info("Pass files: %s" % len([item for item in res if item[3] == 'Pass'])) + logger.info("Fail files: %s" % len([item for item in res if item[3] == 'Fail'])) + if len([item for item in res if item[3] == 'Fail']) > 0: + logger.error(f"Scan failed,please check the log file:{scan_result_log}") + exit(1) \ No newline at end of file diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 85e3d2ab73..26cabad107 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -83,7 +83,7 @@ docker run \ -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1" + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j 10|| exit 1" # -v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \ if [[ -d ${WORKDIR}/debugNoSan ]] ;then From e94374f0fa626e74930d83d51026ec743c283f75 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 14:46:32 +0800 Subject: [PATCH 06/60] test: scan returned values in ci --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index b6275d0f6d..3904a62656 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -62,6 +62,7 @@ def check_docs() { script: ''' cd ${WKC} git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/" || : + mkdir -p ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} ''', returnStdout: true ).trim() @@ -70,7 +71,6 @@ def check_docs() { docs_only=1 } else { echo file_changed - mkdir -p ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} echo file_changed > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt } } From 52ead8ad20acd0022aa0c387715b1429374129f0 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 14:46:43 +0800 Subject: [PATCH 07/60] test: scan returned values in ci --- Jenkinsfile2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 3904a62656..d5a56f09fa 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -70,8 +70,8 @@ def check_docs() { echo "docs PR" docs_only=1 } else { - echo file_changed - echo file_changed > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt + echo file_changed + new File("${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt").write(file_changed) } } } From 5190b7c949543625c1f3ac8dd0bce498fc08e08e Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 14:57:37 +0800 Subject: [PATCH 08/60] test: scan returned values in ci --- Jenkinsfile2 | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index d5a56f09fa..4a70a3e80c 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -71,8 +71,12 @@ def check_docs() { docs_only=1 } else { echo file_changed - new File("${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt").write(file_changed) } + script { + sh ''' + echo ${file_changed} > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} + ''' + } } } def pre_test(){ From d82fc89b90c34850ba502bc39c6268d021f3f85f Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 15:02:04 +0800 Subject: [PATCH 09/60] test: scan returned values in ci --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 4a70a3e80c..3229ffcbca 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -74,7 +74,7 @@ def check_docs() { } script { sh ''' - echo ${file_changed} > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} + echo ${file_changed} > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' } } From add5be6cea5c9f30a9c8cf31fe1d7a21567d5403 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 15:17:25 +0800 Subject: [PATCH 10/60] test: scan returned values in ci --- Jenkinsfile2 | 8 +++- tests/parallel_test/container_build.sh | 64 +++++++++++++------------- 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 3229ffcbca..fea43ac978 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -4,6 +4,7 @@ import jenkins.model.CauseOfInterruption docs_only=0 node { } +def file_changed def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -58,7 +59,7 @@ def check_docs() { git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD ''' - def file_changed = sh ( + file_changed = sh ( script: ''' cd ${WKC} git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/" || : @@ -76,7 +77,7 @@ def check_docs() { sh ''' echo ${file_changed} > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' - } + } } } def pre_test(){ @@ -430,6 +431,9 @@ pipeline { cd ${WKC}/tests/parallel_test time ./container_build.sh -w ${WKDIR} -e ''' + sh ''' + echo ${file_changed} > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt + ''' sh ''' cd ${WKC}/tests/ci python3 scan_file_path.py -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 26cabad107..effe8f0d5e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -103,39 +103,39 @@ fi mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugNoSan date -docker run \ - -v $REP_MOUNT_PARAM \ - -v /root/.cargo/registry:/root/.cargo/registry \ - -v /root/.cargo/git:/root/.cargo/git \ - -v /root/go/pkg/mod:/root/go/pkg/mod \ - -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local.1:/root/.cos-local.2 \ - -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ - -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ - -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ - -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ - -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ - -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ - -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ - -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ - -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ - -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ - -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ - -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ - -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ - -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ - -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ - -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ - -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ - -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ - -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ - -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ - -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ - -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ - -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " +# docker run \ +# -v $REP_MOUNT_PARAM \ +# -v /root/.cargo/registry:/root/.cargo/registry \ +# -v /root/.cargo/git:/root/.cargo/git \ +# -v /root/go/pkg/mod:/root/go/pkg/mod \ +# -v /root/.cache/go-build:/root/.cache/go-build \ +# -v /root/.cos-local.1:/root/.cos-local.2 \ +# -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ +# -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ +# -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ +# -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ +# -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ +# -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ +# -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ +# -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ +# -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ +# -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ +# -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ +# -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ +# -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ +# -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ +# -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ +# -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ +# -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ +# -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ +# -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ +# -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ +# -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ +# -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ +# -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ +# --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " -mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan +# mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan ret=$? exit $ret From 951299efb487f099dcddb25a977e1fa7147f8803 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 15:36:30 +0800 Subject: [PATCH 11/60] test: scan returned values in ci --- Jenkinsfile2 | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index fea43ac978..ef9391a5da 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -4,8 +4,6 @@ import jenkins.model.CauseOfInterruption docs_only=0 node { } -def file_changed - def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -59,7 +57,7 @@ def check_docs() { git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD ''' - file_changed = sh ( + def file_changed = sh ( script: ''' cd ${WKC} git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/" || : @@ -73,9 +71,10 @@ def check_docs() { } else { echo file_changed } + env.FILE_CHANGED = file_changed script { sh ''' - echo ${file_changed} > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt + echo ''' + env.FILE_CHANGED + ''' > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' } } From 81ed8c758fc9be1d2087cf3e290f2078c494d3ba Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 15:43:22 +0800 Subject: [PATCH 12/60] test: scan returned values in ci --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index ef9391a5da..8706db3baa 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -74,7 +74,7 @@ def check_docs() { env.FILE_CHANGED = file_changed script { sh ''' - echo ''' + env.FILE_CHANGED + ''' > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt + echo " ''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' } } From c482e83fbe77842343b176cc88e40173614993a0 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Thu, 1 Aug 2024 15:50:21 +0800 Subject: [PATCH 13/60] fix issue --- source/libs/executor/src/streamfilloperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/streamfilloperator.c b/source/libs/executor/src/streamfilloperator.c index c6bf13dabd..480814f6a0 100644 --- a/source/libs/executor/src/streamfilloperator.c +++ b/source/libs/executor/src/streamfilloperator.c @@ -470,6 +470,7 @@ static int32_t checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t gr SWinKey key = {.groupId = groupId, .ts = ts}; if (tSimpleHashGet(pFillSup->pResMap, &key, sizeof(SWinKey)) != NULL) { (*pRes) = false; + goto _end; } code = tSimpleHashPut(pFillSup->pResMap, &key, sizeof(SWinKey), NULL, 0); QUERY_CHECK_CODE(code, lino, _end); From c73de30afb331edebdbe5fb60643dde9e236b836 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 15:56:29 +0800 Subject: [PATCH 14/60] test: scan returned values in ci --- Jenkinsfile2 | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 8706db3baa..6d6e444dee 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -74,7 +74,7 @@ def check_docs() { env.FILE_CHANGED = file_changed script { sh ''' - echo " ''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt + echo "''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' } } @@ -431,7 +431,8 @@ pipeline { time ./container_build.sh -w ${WKDIR} -e ''' sh ''' - echo ${file_changed} > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt + mkdir -p ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} + echo "''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' sh ''' cd ${WKC}/tests/ci From 6826fc5f84f66f9fbac7dcb914f5974a5fd827f7 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 16:50:24 +0800 Subject: [PATCH 15/60] test: scan returned values in ci --- Jenkinsfile2 | 6 ------ 1 file changed, 6 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 6d6e444dee..fc8d59ba89 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -61,7 +61,6 @@ def check_docs() { script: ''' cd ${WKC} git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/" || : - mkdir -p ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} ''', returnStdout: true ).trim() @@ -72,11 +71,6 @@ def check_docs() { echo file_changed } env.FILE_CHANGED = file_changed - script { - sh ''' - echo "''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt - ''' - } } } def pre_test(){ From 0940299cf5f5835d18d0b56c0397dd9609d3c138 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 17:53:56 +0800 Subject: [PATCH 16/60] test: scan returned values in ci --- tests/ci/scan_file_path.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 92f287e797..91f0adc213 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -53,7 +53,7 @@ else: # log file path -log_file_path = f"{source_path}/../{branch_name}/" +log_file_path = f"{source_path}/../log/{branch_name}/" os.makedirs(log_file_path, exist_ok=True) scan_log_file = f"{log_file_path}/scan.log" @@ -65,8 +65,11 @@ scan_result_base_path = f"{log_file_path}/clang_scan_result/" # the compile commands json file path -# compile_commands_path = f"{source_path}/../debugNoSan/compile_commands.json" -compile_commands_path = f"{source_path}/debug/compile_commands.json" +compile_commands_path = f"{source_path}/../debugNoSan/compile_commands.json" +sed_command = r"sed -i 's/home/var\\lib\\jenkins\\workspace/g' compile_commands.json" +result = subprocess.run(sed_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) +logger.debug(f"STDOUT: {result.stdout} STDERR: {result.stderr}") +# compile_commands_path = f"{source_path}/debug/compile_commands.json" # the ast parser rule for c file clang_scan_rules_path = f"{self_path}/filter_for_return_values" @@ -164,6 +167,7 @@ if __name__ == "__main__": if not os.path.exists(scan_result_path): os.makedirs(scan_result_path) for file in all_file_path: + cmd = f"clang-query-10 -p {compile_commands_path} {file} -f {clang_scan_rules_path}" print(f"cmd:{cmd}") try: From 7cf149ff6b2b07adb6e5ff1818b98a5176c36a48 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 18:09:55 +0800 Subject: [PATCH 17/60] test: scan returned values in ci --- tests/ci/scan_file_path.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 91f0adc213..24a2651058 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -65,7 +65,7 @@ scan_result_base_path = f"{log_file_path}/clang_scan_result/" # the compile commands json file path -compile_commands_path = f"{source_path}/../debugNoSan/compile_commands.json" +compile_commands_path = f"{source_path}/debugNoSan/compile_commands.json" sed_command = r"sed -i 's/home/var\\lib\\jenkins\\workspace/g' compile_commands.json" result = subprocess.run(sed_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) logger.debug(f"STDOUT: {result.stdout} STDERR: {result.stderr}") From a8c99adf8798d6f0c06ca9ac61b6254b716d7710 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 19:30:00 +0800 Subject: [PATCH 18/60] test: scan returned values in ci --- tests/ci/scan_file_path.py | 45 ++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 24a2651058..43935aa5ec 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -27,49 +27,56 @@ for key, value in opts: self_path = os.path.dirname(os.path.realpath(__file__)) if ("community" in self_path): - source_path = self_path[:self_path.find("community")] - work_path = source_path[:source_path.find("TDinternal")] + TD_project_path = self_path[:self_path.find("community")] + work_path = TD_project_path[:TD_project_path.find("TDinternal")] else: - source_path = self_path[:self_path.find("tests")] - work_path = source_path[:source_path.find("TDengine")] + TD_project_path = self_path[:self_path.find("tests")] + work_path = TD_project_path[:TD_project_path.find("TDengine")] # Check if "community" or "tests" is in self_path index_community = self_path.find("community") if index_community != -1: - source_path = self_path[:index_community] - index_TDinternal = source_path.find("TDinternal") + TD_project_path = self_path[:index_community] + index_TDinternal = TD_project_path.find("TDinternal") # Check if index_TDinternal is valid and set work_path accordingly if index_TDinternal != -1: - work_path = source_path[:index_TDinternal] + work_path = TD_project_path[:index_TDinternal] else: index_tests = self_path.find("tests") if index_tests != -1: - source_path = self_path[:index_tests] + TD_project_path = self_path[:index_tests] # Check if index_TDengine is valid and set work_path accordingly - index_TDengine = source_path.find("TDengine") + index_TDengine = TD_project_path.find("TDengine") if index_TDengine != -1: - work_path = source_path[:index_TDengine] + work_path = TD_project_path[:index_TDengine] # log file path -log_file_path = f"{source_path}/../log/{branch_name}/" +log_file_path = f"{work_path}/log/{branch_name}/" os.makedirs(log_file_path, exist_ok=True) scan_log_file = f"{log_file_path}/scan.log" logger.add(scan_log_file, rotation="10MB", retention="7 days", level="DEBUG") -print(self_path,work_path,source_path,log_file_path) +print(self_path,work_path,TD_project_path,log_file_path) # scan result base path scan_result_base_path = f"{log_file_path}/clang_scan_result/" # the compile commands json file path -compile_commands_path = f"{source_path}/debugNoSan/compile_commands.json" -sed_command = r"sed -i 's/home/var\\lib\\jenkins\\workspace/g' compile_commands.json" +compile_commands_path = f"{work_path}/debugNoSan/compile_commands.json" +# compile_commands_path = f"{TD_project_path}/debug/compile_commands.json" +print(f"compile_commands_path:{compile_commands_path}") + +# replace the docerk worf path with real work path in compile_commands.json +docker_work_path = "home" +replace_path= work_path[1:-1] +replace_path = replace_path.replace("/", "\/") +sed_command = f"sed -i 's/{docker_work_path}/{replace_path}/g' {compile_commands_path}" +print(sed_command) result = subprocess.run(sed_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) logger.debug(f"STDOUT: {result.stdout} STDERR: {result.stderr}") -# compile_commands_path = f"{source_path}/debug/compile_commands.json" # the ast parser rule for c file clang_scan_rules_path = f"{self_path}/filter_for_return_values" @@ -112,16 +119,16 @@ def scan_files_path(source_file_path): def input_files(change_files): # scan_dir_list = ["source", "include", "docs/examples", "tests/script/api", "src/plugins"] scan_dir_list = ["source", "include", "docs/examples", "src/plugins"] - scan_skip_file_list = [f"{source_path}/TDinternal/community/tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", "/test/", "contrib", "debug", "deps", f"{source_path}/TDinternal/community/source/libs/parser/src/sql.c", f"{source_path}/TDinternal/community/source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"] + scan_skip_file_list = [f"{TD_project_path}/TDinternal/community/tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", "/test/", "contrib", "debug", "deps", f"{TD_project_path}/TDinternal/community/source/libs/parser/src/sql.c", f"{TD_project_path}/TDinternal/community/source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"] with open(change_files, 'r') as file: for line in file: file_name = line.strip() if any(dir_name in file_name for dir_name in scan_dir_list): if (file_name.endswith(".c") or file_name.endswith(".h") or line.endswith(".cpp")) and all(dir_name not in file_name for dir_name in scan_skip_file_list): if "enterprise" in file_name: - file_name = os.path.join(source_path, file_name) + file_name = os.path.join(TD_project_path, file_name) else: - tdc_file_path = os.path.join(source_path, "community/") + tdc_file_path = os.path.join(TD_project_path, "community/") file_name = os.path.join(tdc_file_path, file_name) all_file_path.append(file_name) print(f"all_file_path:{all_file_path}") @@ -156,7 +163,7 @@ def write_csv(file_path, data): if __name__ == "__main__": command_executor = CommandExecutor() # get all the c files path - # scan_files_path(source_path) + # scan_files_path(TD_project_path) input_files(change_file_list) print(f"all_file_path:{all_file_path}") res = [] From e26b9a9f1b4945a25b7e5eb2b0034bfdd2a66f48 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 20:46:49 +0800 Subject: [PATCH 19/60] test: scan returned values in ci --- Jenkinsfile2 | 15 +++--- tests/ci/scan_file_path.py | 20 ++++---- tests/parallel_test/container_build.sh | 64 +++++++++++++------------- 3 files changed, 51 insertions(+), 48 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index fc8d59ba89..95416f4363 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -418,20 +418,23 @@ pipeline { timeout(time: 200, unit: 'MINUTES'){ pre_test() script { - sh ''' - date - rm -rf ${WKC}/debug - cd ${WKC}/tests/parallel_test - time ./container_build.sh -w ${WKDIR} -e - ''' sh ''' mkdir -p ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} echo "''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + sh''' + cd $${WK}/;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j 10|| exit 1 + ''' sh ''' cd ${WKC}/tests/ci python3 scan_file_path.py -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + sh ''' + date + rm -rf ${WKC}/debug + cd ${WKC}/tests/parallel_test + time ./container_build.sh -w ${WKDIR} -e + ''' def extra_param = "" def log_server_file = "/home/log_server.json" def timeout_cmd = "" diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 43935aa5ec..258c760391 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -65,18 +65,18 @@ scan_result_base_path = f"{log_file_path}/clang_scan_result/" # the compile commands json file path -compile_commands_path = f"{work_path}/debugNoSan/compile_commands.json" -# compile_commands_path = f"{TD_project_path}/debug/compile_commands.json" +# compile_commands_path = f"{work_path}/debugNoSan/compile_commands.json" +compile_commands_path = f"{TD_project_path}/debug/compile_commands.json" print(f"compile_commands_path:{compile_commands_path}") -# replace the docerk worf path with real work path in compile_commands.json -docker_work_path = "home" -replace_path= work_path[1:-1] -replace_path = replace_path.replace("/", "\/") -sed_command = f"sed -i 's/{docker_work_path}/{replace_path}/g' {compile_commands_path}" -print(sed_command) -result = subprocess.run(sed_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) -logger.debug(f"STDOUT: {result.stdout} STDERR: {result.stderr}") +# # replace the docerk worf path with real work path in compile_commands.json +# docker_work_path = "home" +# replace_path= work_path[1:-1] +# replace_path = replace_path.replace("/", "\/") +# sed_command = f"sed -i 's/{docker_work_path}/{replace_path}/g' {compile_commands_path}" +# print(sed_command) +# result = subprocess.run(sed_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) +# logger.debug(f"STDOUT: {result.stdout} STDERR: {result.stderr}") # the ast parser rule for c file clang_scan_rules_path = f"{self_path}/filter_for_return_values" diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index effe8f0d5e..26cabad107 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -103,39 +103,39 @@ fi mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugNoSan date -# docker run \ -# -v $REP_MOUNT_PARAM \ -# -v /root/.cargo/registry:/root/.cargo/registry \ -# -v /root/.cargo/git:/root/.cargo/git \ -# -v /root/go/pkg/mod:/root/go/pkg/mod \ -# -v /root/.cache/go-build:/root/.cache/go-build \ -# -v /root/.cos-local.1:/root/.cos-local.2 \ -# -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ -# -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ -# -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ -# -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ -# -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -# -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ -# -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ -# -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ -# -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ -# -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ -# -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ -# -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ -# -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ -# -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ -# -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ -# -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ -# -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ -# -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ -# -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ -# -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ -# -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ -# -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -# -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ -# --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " +docker run \ + -v $REP_MOUNT_PARAM \ + -v /root/.cargo/registry:/root/.cargo/registry \ + -v /root/.cargo/git:/root/.cargo/git \ + -v /root/go/pkg/mod:/root/go/pkg/mod \ + -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/.cos-local.1:/root/.cos-local.2 \ + -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ + -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ + -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ + -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ + -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ + -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ + -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ + -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ + -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ + -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ + -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ + -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ + -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ + -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ + -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ + -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ + -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ + -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ + -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ + -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ + -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ + -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ + -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " -# mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan +mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan ret=$? exit $ret From 13294c9d17334feda30671f3c874cf671cbeadcb Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 1 Aug 2024 21:06:48 +0800 Subject: [PATCH 20/60] test: scan returned values in ci --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 95416f4363..1b710a967a 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -423,7 +423,7 @@ pipeline { echo "''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' sh''' - cd $${WK}/;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j 10|| exit 1 + cd ${WK} || exit 0 ; rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j 10|| exit 1 ''' sh ''' cd ${WKC}/tests/ci From 3262b61df0ebc3f6a3081203cb21417da85ade3b Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 00:48:49 +0800 Subject: [PATCH 21/60] test: scan returned values in ci --- Jenkinsfile2 | 11 +-- tests/ci/scan_file_path.py | 39 +++++++--- tests/parallel_test/run_container_scan.sh | 92 +++++++++++++++++++++++ 3 files changed, 125 insertions(+), 17 deletions(-) create mode 100755 tests/parallel_test/run_container_scan.sh diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 1b710a967a..3204fc56fe 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -422,13 +422,6 @@ pipeline { mkdir -p ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} echo "''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' - sh''' - cd ${WK} || exit 0 ; rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j 10|| exit 1 - ''' - sh ''' - cd ${WKC}/tests/ci - python3 scan_file_path.py -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt - ''' sh ''' date rm -rf ${WKC}/debug @@ -460,6 +453,10 @@ pipeline { } } } + sh ''' + cd ${WKC}/tests/ci + run_scan_container.sh -d ${WK} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' + ''' sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 258c760391..b22adde427 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -6,7 +6,8 @@ from datetime import datetime from loguru import logger import getopt -opts, args = getopt.gnu_getopt(sys.argv[1:], 'b:f:', [ + +opts, args = getopt.gnu_getopt(sys.argv[1:], 'b:f:w:', [ 'branch_name=']) for key, value in opts: if key in ['-h', '--help']: @@ -14,6 +15,7 @@ for key, value in opts: 'Usage: python3 scan.py -b -f ') print('-b branch name or PR ID to scan') print('-f change files list') + print('-w web server') sys.exit(0) @@ -21,18 +23,20 @@ for key, value in opts: branch_name = value if key in ['-f', '--filesName']: change_file_list = value + if key in ['-w', '--webServer']: + web_server = value # the base source code file path self_path = os.path.dirname(os.path.realpath(__file__)) -if ("community" in self_path): - TD_project_path = self_path[:self_path.find("community")] - work_path = TD_project_path[:TD_project_path.find("TDinternal")] +# if ("community" in self_path): +# TD_project_path = self_path[:self_path.find("community")] +# work_path = TD_project_path[:TD_project_path.find("TDinternal")] -else: - TD_project_path = self_path[:self_path.find("tests")] - work_path = TD_project_path[:TD_project_path.find("TDengine")] +# else: +# TD_project_path = self_path[:self_path.find("tests")] +# work_path = TD_project_path[:TD_project_path.find("TDengine")] # Check if "community" or "tests" is in self_path index_community = self_path.find("community") @@ -53,12 +57,16 @@ else: # log file path -log_file_path = f"{work_path}/log/{branch_name}/" +log_file_path = f"{work_path}/{branch_name}/" os.makedirs(log_file_path, exist_ok=True) scan_log_file = f"{log_file_path}/scan.log" logger.add(scan_log_file, rotation="10MB", retention="7 days", level="DEBUG") -print(self_path,work_path,TD_project_path,log_file_path) +# logging.basicConfig(level=logging.INFO, +# format='%(asctime)s | %(levelname)s | %(name)s:%(lineno)d - %(message)s') +# logger = logging.getLogger(__name__) + +print(self_path,work_path,TD_project_path,log_file_path,change_file_list) # scan result base path scan_result_base_path = f"{log_file_path}/clang_scan_result/" @@ -81,6 +89,7 @@ print(f"compile_commands_path:{compile_commands_path}") # the ast parser rule for c file clang_scan_rules_path = f"{self_path}/filter_for_return_values" +# # all the c files path will be checked all_file_path = [] @@ -140,7 +149,7 @@ file_res_path = "" def save_scan_res(res_base_path, file_path, out, err): global file_res_path - file_res_path = os.path.join(res_base_path, file_path.replace(f"{work_path}", "").split(".")[0] + ".res") + file_res_path = os.path.join(res_base_path, file_path.replace(f"{work_path}", "").split(".")[0] + ".txt") print(f"file_res_path:{file_res_path},res_base_path:{res_base_path},file_path:{file_path}") if not os.path.exists(os.path.dirname(file_res_path)): os.makedirs(os.path.dirname(file_res_path)) @@ -167,6 +176,7 @@ if __name__ == "__main__": input_files(change_file_list) print(f"all_file_path:{all_file_path}") res = [] + web_path = [] res.append(["scan_source_file", "scan_result_file", "match_num", "check_result"]) # create dir current_time = datetime.now().strftime("%Y%m%d%H%M%S") @@ -179,6 +189,7 @@ if __name__ == "__main__": print(f"cmd:{cmd}") try: stdout, stderr = command_executor.execute(cmd) + print(stderr) lines = stdout.split("\n") if lines[-2].endswith("matches.") or lines[-2].endswith("match."): match_num = int(lines[-2].split(" ")[0]) @@ -186,7 +197,13 @@ if __name__ == "__main__": if match_num > 0: logger.info(f"scan_result_path: {scan_result_path} ,file:{file}") save_scan_res(scan_result_path, file, stdout, stderr) + index_tests = file_res_path.find(branch_name) + if index_tests != -1: + web_path_file = file_res_path[index_tests:] + web_path_file = os.path.join(web_server, web_path_file) + web_path.append(web_path_file) res.append([file, file_res_path, match_num, 'Pass' if match_num == 0 else 'Fail']) + else: logger.warning("The result of scan is invalid for: %s" % file) except Exception as e: @@ -207,4 +224,6 @@ if __name__ == "__main__": logger.info("Fail files: %s" % len([item for item in res if item[3] == 'Fail'])) if len([item for item in res if item[3] == 'Fail']) > 0: logger.error(f"Scan failed,please check the log file:{scan_result_log}") + for index, failed_result_file in enumerate(web_path): + logger.error(f"failed number: {index}, failed_result_file: {failed_result_file}") exit(1) \ No newline at end of file diff --git a/tests/parallel_test/run_container_scan.sh b/tests/parallel_test/run_container_scan.sh new file mode 100755 index 0000000000..078757a57a --- /dev/null +++ b/tests/parallel_test/run_container_scan.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +function usage() { + echo "$0" + echo -e "\t -d work dir" + echo -e "\t -b pr and id" + echo -e "\t -w web server " + echo -e "\t -f scan file " + echo -e "\t -h help" +} + +while getopts "d:b:w:f:h" opt; do + case $opt in + d) + WORKDIR=$OPTARG + ;; + b) + branch_name_id=$OPTARG + ;; + f) + scan_file_name=$OPTARG + ;; + w) + web_server=$OPTARG + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + +if [ -z "$branch_name_id" ]; then + usage + exit 1 +fi + +if [ -z "$scan_file_name" ]; then + usage + exit 1 +fi +if [ -z "$WORKDIR" ]; then + usage + exit 1 +fi +if [ -z "$web_server" ]; then + usage + exit 1 +fi + + # enterprise edition +INTERNAL_REPDIR=$WORKDIR/TDinternal +REPDIR_DEBUG=$WORKDIR/debugNoSan/ + +REP_MOUNT_DEBUG="${REPDIR_DEBUG}:/home/TDinternal/debug/" +REP_MOUNT_PARAM="$INTERNAL_REPDIR:/home/TDinternal" + +CONTAINER_TESTDIR=/home/TDinternal/community + +#scan file log path +scan_temp="$WORKDIR/log/${branch_name_id}/" +docker_scan_temp="/home/${branch_name_id}/" +mkdir -p $scan_temp +mkdir -p $docker_scan_temp + + +scan_scripts="$CONTAINER_TESTDIR/tests/ci/scan_file_path.py" +scan_file_name="$docker_scan_temp/docs_changed.txt" + +ulimit -c unlimited +cat << EOF +docker run \ + -v $REP_MOUNT_PARAM \ + -v $REP_MOUNT_DEBUG \ + -v $scan_temp:$docker_scan_temp \ + --rm --ulimit core=-1 taos_test:v1.0 python3 $scan_scripts -b "${branch_name_id}" -f "${scan_file_name}" -w ${web_server} +EOF +docker run \ + -v $REP_MOUNT_PARAM \ + -v $REP_MOUNT_DEBUG \ + -v $scan_temp:$docker_scan_temp \ + --rm --ulimit core=-1 taos_test:v1.0 python3 $scan_scripts -b "${branch_name_id}" -f "${scan_file_name}" -w ${web_server} + + +ret=$? +exit $ret + From 7065daab70371ff1a2a2ed1acb66a025f098823b Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 01:02:49 +0800 Subject: [PATCH 22/60] test: scan returned values in ci --- tests/parallel_test/container_build.sh | 64 +++++++++---------- ...ontainer_scan.sh => run_scan_container.sh} | 0 2 files changed, 32 insertions(+), 32 deletions(-) rename tests/parallel_test/{run_container_scan.sh => run_scan_container.sh} (100%) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 26cabad107..effe8f0d5e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -103,39 +103,39 @@ fi mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugNoSan date -docker run \ - -v $REP_MOUNT_PARAM \ - -v /root/.cargo/registry:/root/.cargo/registry \ - -v /root/.cargo/git:/root/.cargo/git \ - -v /root/go/pkg/mod:/root/go/pkg/mod \ - -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local.1:/root/.cos-local.2 \ - -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ - -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ - -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ - -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ - -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ - -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ - -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ - -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ - -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ - -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ - -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ - -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ - -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ - -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ - -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ - -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ - -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ - -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ - -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ - -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ - -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ - -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ - -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " +# docker run \ +# -v $REP_MOUNT_PARAM \ +# -v /root/.cargo/registry:/root/.cargo/registry \ +# -v /root/.cargo/git:/root/.cargo/git \ +# -v /root/go/pkg/mod:/root/go/pkg/mod \ +# -v /root/.cache/go-build:/root/.cache/go-build \ +# -v /root/.cos-local.1:/root/.cos-local.2 \ +# -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ +# -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ +# -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ +# -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ +# -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ +# -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ +# -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ +# -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ +# -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ +# -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ +# -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ +# -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ +# -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ +# -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ +# -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ +# -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ +# -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ +# -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ +# -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ +# -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ +# -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ +# -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ +# -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ +# --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " -mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan +# mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan ret=$? exit $ret diff --git a/tests/parallel_test/run_container_scan.sh b/tests/parallel_test/run_scan_container.sh similarity index 100% rename from tests/parallel_test/run_container_scan.sh rename to tests/parallel_test/run_scan_container.sh From f81cc9353247654f75f285263dafa6fa05a83846 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 01:12:47 +0800 Subject: [PATCH 23/60] test: scan returned values in ci --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 3204fc56fe..5a19ad5441 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -454,7 +454,7 @@ pipeline { } } sh ''' - cd ${WKC}/tests/ci + cd ${WKC}/tests/parallel_test run_scan_container.sh -d ${WK} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' sh ''' From 744de4f305516c9cee8e6491e627ce9eedee3fd8 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 01:36:11 +0800 Subject: [PATCH 24/60] test: scan returned values in ci --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 5a19ad5441..4636702340 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -455,7 +455,7 @@ pipeline { } sh ''' cd ${WKC}/tests/parallel_test - run_scan_container.sh -d ${WK} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' + run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' sh ''' cd ${WKC}/tests/parallel_test From cc74abad596da282e78b7b6fde13d9063f53375a Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 01:53:22 +0800 Subject: [PATCH 25/60] test: scan returned values in ci --- Jenkinsfile2 | 2 +- tests/ci/filter_for_return_values | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 4636702340..15e311b41e 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -455,7 +455,7 @@ pipeline { } sh ''' cd ${WKC}/tests/parallel_test - run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' + ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' sh ''' cd ${WKC}/tests/parallel_test diff --git a/tests/ci/filter_for_return_values b/tests/ci/filter_for_return_values index 734619a8af..050ceda29b 100644 --- a/tests/ci/filter_for_return_values +++ b/tests/ci/filter_for_return_values @@ -4,7 +4,6 @@ match callExpr( doStmt(hasCondition(expr().bind("cond")))) ), unless(hasType(voidType())), - unless(callee(functionDecl(hasName("memcpy")))), unless(callee(functionDecl(hasName("strcpy")))), unless(callee(functionDecl(hasName("strcat")))), unless(callee(functionDecl(hasName("strncpy")))), From 5da0d33bf20935e177661c1ac6e345c57cb2523d Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 02:12:46 +0800 Subject: [PATCH 26/60] test: scan returned values in ci --- Jenkinsfile2 | 2 +- tests/ci/filter_for_return_values | 1 + tests/parallel_test/container_build.sh | 64 +++++++++++++------------- 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 15e311b41e..8d4652943d 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -400,7 +400,7 @@ pipeline { } } stage('linux test') { - agent{label "slave1_47 "} + agent{label "slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "} options { skipDefaultCheckout() } when { changeRequest() diff --git a/tests/ci/filter_for_return_values b/tests/ci/filter_for_return_values index 050ceda29b..734619a8af 100644 --- a/tests/ci/filter_for_return_values +++ b/tests/ci/filter_for_return_values @@ -4,6 +4,7 @@ match callExpr( doStmt(hasCondition(expr().bind("cond")))) ), unless(hasType(voidType())), + unless(callee(functionDecl(hasName("memcpy")))), unless(callee(functionDecl(hasName("strcpy")))), unless(callee(functionDecl(hasName("strcat")))), unless(callee(functionDecl(hasName("strncpy")))), diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index effe8f0d5e..26cabad107 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -103,39 +103,39 @@ fi mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugNoSan date -# docker run \ -# -v $REP_MOUNT_PARAM \ -# -v /root/.cargo/registry:/root/.cargo/registry \ -# -v /root/.cargo/git:/root/.cargo/git \ -# -v /root/go/pkg/mod:/root/go/pkg/mod \ -# -v /root/.cache/go-build:/root/.cache/go-build \ -# -v /root/.cos-local.1:/root/.cos-local.2 \ -# -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ -# -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ -# -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ -# -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ -# -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -# -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ -# -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ -# -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ -# -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ -# -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ -# -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ -# -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ -# -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ -# -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ -# -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ -# -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ -# -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ -# -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ -# -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ -# -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ -# -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ -# -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -# -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ -# --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " +docker run \ + -v $REP_MOUNT_PARAM \ + -v /root/.cargo/registry:/root/.cargo/registry \ + -v /root/.cargo/git:/root/.cargo/git \ + -v /root/go/pkg/mod:/root/go/pkg/mod \ + -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/.cos-local.1:/root/.cos-local.2 \ + -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ + -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ + -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ + -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ + -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ + -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ + -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ + -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ + -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ + -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ + -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ + -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ + -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ + -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ + -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ + -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ + -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ + -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ + -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ + -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ + -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ + -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ + -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " -# mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan +mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan ret=$? exit $ret From cef1920251080a2ae66f28643e9de26f3cf5aa10 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 09:13:33 +0800 Subject: [PATCH 27/60] test: scan returned values in ci --- tests/ci/scan_file_path.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index b22adde427..df4dc16aa4 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -62,11 +62,8 @@ os.makedirs(log_file_path, exist_ok=True) scan_log_file = f"{log_file_path}/scan.log" logger.add(scan_log_file, rotation="10MB", retention="7 days", level="DEBUG") -# logging.basicConfig(level=logging.INFO, -# format='%(asctime)s | %(levelname)s | %(name)s:%(lineno)d - %(message)s') -# logger = logging.getLogger(__name__) - -print(self_path,work_path,TD_project_path,log_file_path,change_file_list) +#if error happens, open this to debug +# print(self_path,work_path,TD_project_path,log_file_path,change_file_list) # scan result base path scan_result_base_path = f"{log_file_path}/clang_scan_result/" @@ -75,7 +72,9 @@ scan_result_base_path = f"{log_file_path}/clang_scan_result/" # the compile commands json file path # compile_commands_path = f"{work_path}/debugNoSan/compile_commands.json" compile_commands_path = f"{TD_project_path}/debug/compile_commands.json" -print(f"compile_commands_path:{compile_commands_path}") + +#if error happens, open this to debug +# print(f"compile_commands_path:{compile_commands_path}") # # replace the docerk worf path with real work path in compile_commands.json # docker_work_path = "home" @@ -140,17 +139,14 @@ def input_files(change_files): tdc_file_path = os.path.join(TD_project_path, "community/") file_name = os.path.join(tdc_file_path, file_name) all_file_path.append(file_name) - print(f"all_file_path:{all_file_path}") - # for file_path in change_files: - # if (file_path.endswith(".c") or file_path.endswith(".h") or file_path.endswith(".cpp")) and all(item not in file_path for item in scan_skip_file_list): - # all_file_path.append(file_path) + # print(f"all_file_path:{all_file_path}") logger.info("Found %s files" % len(all_file_path)) file_res_path = "" def save_scan_res(res_base_path, file_path, out, err): global file_res_path file_res_path = os.path.join(res_base_path, file_path.replace(f"{work_path}", "").split(".")[0] + ".txt") - print(f"file_res_path:{file_res_path},res_base_path:{res_base_path},file_path:{file_path}") + # print(f"file_res_path:{file_res_path},res_base_path:{res_base_path},file_path:{file_path}") if not os.path.exists(os.path.dirname(file_res_path)): os.makedirs(os.path.dirname(file_res_path)) logger.info("Save scan result to: %s" % file_res_path) @@ -174,7 +170,7 @@ if __name__ == "__main__": # get all the c files path # scan_files_path(TD_project_path) input_files(change_file_list) - print(f"all_file_path:{all_file_path}") + # print(f"all_file_path:{all_file_path}") res = [] web_path = [] res.append(["scan_source_file", "scan_result_file", "match_num", "check_result"]) @@ -189,7 +185,8 @@ if __name__ == "__main__": print(f"cmd:{cmd}") try: stdout, stderr = command_executor.execute(cmd) - print(stderr) + #if "error" in stderr: + # print(stderr) lines = stdout.split("\n") if lines[-2].endswith("matches.") or lines[-2].endswith("match."): match_num = int(lines[-2].split(" ")[0]) From ac706ee37a8835af0433d28c9cd0b61add0dd829 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 2 Aug 2024 02:11:59 +0000 Subject: [PATCH 28/60] fix/TD-31176 --- include/util/taoserror.h | 1 + source/dnode/mnode/impl/src/mndCompact.c | 4 ++++ source/util/src/terror.c | 1 + 3 files changed, 6 insertions(+) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 7ff3e500cf..f0cb30e7e0 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -488,6 +488,7 @@ int32_t taosGetErrSize(); //mnode-compact #define TSDB_CODE_MND_INVALID_COMPACT_ID TAOS_DEF_ERROR_CODE(0, 0x04B1) #define TSDB_CODE_MND_COMPACT_DETAIL_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x04B2) +#define TSDB_CODE_MND_COMPACT_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x04B3) // vnode // #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x diff --git a/source/dnode/mnode/impl/src/mndCompact.c b/source/dnode/mnode/impl/src/mndCompact.c index da01a4b2f6..5a789ee369 100644 --- a/source/dnode/mnode/impl/src/mndCompact.c +++ b/source/dnode/mnode/impl/src/mndCompact.c @@ -523,6 +523,10 @@ static int32_t mndUpdateCompactProgress(SMnode *pMnode, SRpcMsg *pReq, int32_t c int32_t mndProcessQueryCompactRsp(SRpcMsg *pReq) { int32_t code = 0; SQueryCompactProgressRsp req = {0}; + if (pReq->code != 0) { + mError("received wrong compact response, req code is %d", pReq->code); + TAOS_RETURN(pReq->code); + } code = tDeserializeSQueryCompactProgressRsp(pReq->pCont, pReq->contLen, &req); if (code != 0) { mError("failed to deserialize vnode-query-compact-progress-rsp, ret:%d, pCont:%p, len:%d", code, pReq->pCont, diff --git a/source/util/src/terror.c b/source/util/src/terror.c index a0c3b3f766..e9bdafcd5a 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -357,6 +357,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_VIEW_NOT_EXIST, "view not exists in db //mnode-compact TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_COMPACT_ID, "Invalid compact id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_COMPACT_DETAIL_NOT_EXIST, "compact detail doesn't exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_COMPACT_ALREADY_EXIST, "compact already exist") // dnode TAOS_DEFINE_ERROR(TSDB_CODE_DNODE_OFFLINE, "Dnode is offline") From ce006e0b91c607fb33fee67cf31c4b6ddfa550e0 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 10:30:49 +0800 Subject: [PATCH 29/60] test: scan returned values in ci --- source/libs/parser/src/parAstCreater.c | 2 -- tests/ci/scan_file_path.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index a8979bbe1d..cd7cda01e0 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -207,8 +207,6 @@ static bool checkIndexName(SAstCreateContext* pCxt, SToken* pIndexName) { return false; } return true; - - } static bool checkTopicName(SAstCreateContext* pCxt, SToken* pTopicName) { diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index df4dc16aa4..6058cbb72f 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -182,7 +182,7 @@ if __name__ == "__main__": for file in all_file_path: cmd = f"clang-query-10 -p {compile_commands_path} {file} -f {clang_scan_rules_path}" - print(f"cmd:{cmd}") + logger.debug(f"cmd:{cmd}") try: stdout, stderr = command_executor.execute(cmd) #if "error" in stderr: From 7f04e2cfb9c94444dad4611eca6ade1392792679 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 10:47:06 +0800 Subject: [PATCH 30/60] fix(stream): fix memory leak. --- include/common/tmsg.h | 1 + source/common/src/tmsg.c | 8 +++++++ source/dnode/mnode/impl/src/mndStream.c | 32 +++++++++++++------------ 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index b06adf3f2d..41e423ff6e 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1847,6 +1847,7 @@ typedef struct SMStreamDropOrphanMsg { int32_t tSerializeDropOrphanTaskMsg(void* buf, int32_t bufLen, SMStreamDropOrphanMsg* pMsg); int32_t tDeserializeDropOrphanTaskMsg(void* buf, int32_t bufLen, SMStreamDropOrphanMsg* pMsg); +void tDestroyDropOrphanTaskMsg(SMStreamDropOrphanMsg* pMsg); typedef struct { int32_t id; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 32915ba884..9529a75ba8 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5360,6 +5360,14 @@ int32_t tDeserializeDropOrphanTaskMsg(void* buf, int32_t bufLen, SMStreamDropOrp return 0; } +void tDestroyDropOrphanTaskMsg(SMStreamDropOrphanMsg *pMsg) { + if (pMsg == NULL) { + return; + } + + taosArrayDestroy(pMsg->pList); +} + int32_t tEncodeSReplica(SEncoder *pEncoder, SReplica *pReplica) { if (tEncodeI32(pEncoder, pReplica->id) < 0) return -1; if (tEncodeU16(pEncoder, pReplica->port) < 0) return -1; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index b7ab76984a..2235b88c90 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -2749,6 +2749,8 @@ static int32_t mndProcessDropOrphanTaskReq(SRpcMsg *pReq) { int32_t code = 0; SOrphanTask *pTask = NULL; int32_t i = 0; + STrans *pTrans = NULL; + int32_t numOfTasks = 0; SMStreamDropOrphanMsg msg = {0}; code = tDeserializeDropOrphanTaskMsg(pReq->pCont, pReq->contLen, &msg); @@ -2756,10 +2758,10 @@ static int32_t mndProcessDropOrphanTaskReq(SRpcMsg *pReq) { return code; } - int32_t numOfTasks = taosArrayGetSize(msg.pList); + numOfTasks = taosArrayGetSize(msg.pList); if (numOfTasks == 0) { mDebug("no orphan tasks to drop, no need to create trans"); - return code; + goto _err; } mDebug("create trans to drop %d orphan tasks", numOfTasks); @@ -2771,52 +2773,52 @@ static int32_t mndProcessDropOrphanTaskReq(SRpcMsg *pReq) { if (pTask == NULL) { mError("failed to extract entry in drop orphan task list, not create trans to drop orphan-task"); - return TSDB_CODE_SUCCESS; + goto _err; } // check if it is conflict with other trans in both sourceDb and targetDb. bool conflict = mndStreamTransConflictCheck(pMnode, pTask->streamId, MND_STREAM_DROP_NAME, false); if (conflict) { - return -1; + code = TSDB_CODE_MND_TRANS_CONFLICT; + goto _err; } SStreamObj dummyObj = {.uid = pTask->streamId, .sourceDb = "", .targetSTbName = ""}; - STrans *pTrans = NULL; + code = doCreateTrans(pMnode, &dummyObj, NULL, TRN_CONFLICT_NOTHING, MND_STREAM_DROP_NAME, "drop stream", &pTrans); if (pTrans == NULL || code != 0) { mError("failed to create trans to drop orphan tasks since %s", terrstr()); - return code; + goto _err; } code = mndStreamRegisterTrans(pTrans, MND_STREAM_DROP_NAME, pTask->streamId); if (code) { - return code; + goto _err; } // drop all tasks if ((code = mndStreamSetDropActionFromList(pMnode, pTrans, msg.pList)) < 0) { mError("failed to create trans to drop orphan tasks since %s", terrstr()); - mndTransDrop(pTrans); - return code; + goto _err; } // drop stream if ((code = mndPersistTransLog(&dummyObj, pTrans, SDB_STATUS_DROPPED)) < 0) { - mndTransDrop(pTrans); - return code; + goto _err; } code = mndTransPrepare(pMnode, pTrans); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) { mError("trans:%d, failed to prepare drop stream trans since %s", pTrans->id, terrstr()); - mndTransDrop(pTrans); - return code; + goto _err; } +_err: + tDestroyDropOrphanTaskMsg(&msg); + mndTransDrop(pTrans); + if (code == TSDB_CODE_SUCCESS) { mDebug("create drop %d orphan tasks trans succ", numOfTasks); } - - mndTransDrop(pTrans); return code; } \ No newline at end of file From 728900886954a3835396684720d9011fe3a6b29a Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 2 Aug 2024 02:51:29 +0000 Subject: [PATCH 31/60] fix/TD-31176-error-str --- source/dnode/mnode/impl/src/mndCompact.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndCompact.c b/source/dnode/mnode/impl/src/mndCompact.c index 5a789ee369..8b45b13dd1 100644 --- a/source/dnode/mnode/impl/src/mndCompact.c +++ b/source/dnode/mnode/impl/src/mndCompact.c @@ -524,7 +524,7 @@ int32_t mndProcessQueryCompactRsp(SRpcMsg *pReq) { int32_t code = 0; SQueryCompactProgressRsp req = {0}; if (pReq->code != 0) { - mError("received wrong compact response, req code is %d", pReq->code); + mError("received wrong compact response, req code is %s", tstrerror(pReq->code)); TAOS_RETURN(pReq->code); } code = tDeserializeSQueryCompactProgressRsp(pReq->pCont, pReq->contLen, &req); From 965a4e56ea3dcf26e6245a505a758445c8d18a6f Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Tue, 30 Jul 2024 19:25:23 +0800 Subject: [PATCH 32/60] fix: check taosSetSystemLocale errcode --- source/common/src/tglobal.c | 64 ++++++++++++++++++++++++------------- source/os/src/osLocale.c | 2 +- 2 files changed, 43 insertions(+), 23 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 46219fe34c..51ff2bb506 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1212,7 +1212,17 @@ static int32_t taosSetSystemCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "charset"); const char *charset = pItem->str; - (void)taosSetSystemLocale(locale, charset); // ignore this error temporarily + int32_t code = taosSetSystemLocale(locale, charset); + if (TSDB_CODE_SUCCESS != code) { + uInfo("failed to set locale %s, since: %s", locale, tstrerror(code)); + char curLocale[TD_LOCALE_LEN] = {0}; + char curCharset[TD_CHARSET_LEN] = {0}; + taosGetSystemLocale(curLocale, curCharset); + if (0 != strlen(curLocale) && 0 != strlen(curCharset)) { + uInfo("current locale: %s, charset: %s", curLocale, curCharset); + } + } + osSetSystemLocale(locale, charset); TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableCoreFile"); @@ -1669,25 +1679,28 @@ static int32_t cfgInitWrapper(SConfig **pCfg) { } TAOS_RETURN(TSDB_CODE_SUCCESS); } + int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc) { if (tsCfg != NULL) TAOS_RETURN(TSDB_CODE_SUCCESS); - TAOS_CHECK_RETURN(cfgInitWrapper(&tsCfg)); + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = -1; + + TAOS_CHECK_GOTO(cfgInitWrapper(&tsCfg), &lino, _exit); if (tsc) { - TAOS_CHECK_RETURN(taosAddClientCfg(tsCfg)); - TAOS_CHECK_RETURN(taosAddClientLogCfg(tsCfg)); + TAOS_CHECK_GOTO(taosAddClientCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosAddClientLogCfg(tsCfg), &lino, _exit); } else { - TAOS_CHECK_RETURN(taosAddClientCfg(tsCfg)); - TAOS_CHECK_RETURN(taosAddServerCfg(tsCfg)); - TAOS_CHECK_RETURN(taosAddClientLogCfg(tsCfg)); - TAOS_CHECK_RETURN(taosAddServerLogCfg(tsCfg)); + TAOS_CHECK_GOTO(taosAddClientCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosAddServerCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosAddClientLogCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosAddServerLogCfg(tsCfg), &lino, _exit); } - TAOS_CHECK_RETURN(taosAddSystemCfg(tsCfg)); + TAOS_CHECK_GOTO(taosAddSystemCfg(tsCfg), &lino, _exit); - int32_t code = TSDB_CODE_SUCCESS; if ((code = taosLoadCfg(tsCfg, envCmd, cfgDir, envFile, apolloUrl)) != 0) { uError("failed to load cfg since %s", tstrerror(code)); cfgCleanup(tsCfg); @@ -1703,31 +1716,38 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile } if (tsc) { - TAOS_CHECK_RETURN(taosSetClientCfg(tsCfg)); + TAOS_CHECK_GOTO(taosSetClientCfg(tsCfg), &lino, _exit); } else { - TAOS_CHECK_RETURN(taosSetClientCfg(tsCfg)); - TAOS_CHECK_RETURN(taosUpdateServerCfg(tsCfg)); - TAOS_CHECK_RETURN(taosSetServerCfg(tsCfg)); - TAOS_CHECK_RETURN(taosSetReleaseCfg(tsCfg)); - TAOS_CHECK_RETURN(taosSetTfsCfg(tsCfg)); - TAOS_CHECK_RETURN(taosSetS3Cfg(tsCfg)); + TAOS_CHECK_GOTO(taosSetClientCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosUpdateServerCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosSetServerCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosSetReleaseCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosSetTfsCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosSetS3Cfg(tsCfg), &lino, _exit); } - TAOS_CHECK_RETURN(taosSetSystemCfg(tsCfg)); - TAOS_CHECK_RETURN(taosSetFileHandlesLimit()); + TAOS_CHECK_GOTO(taosSetSystemCfg(tsCfg), &lino, _exit); + TAOS_CHECK_GOTO(taosSetFileHandlesLimit(), &lino, _exit); SConfigItem *pItem = cfgGetItem(tsCfg, "debugFlag"); if (NULL == pItem) { uError("debugFlag not found in cfg"); TAOS_RETURN(TSDB_CODE_CFG_NOT_FOUND); } - TAOS_CHECK_RETURN(taosSetAllDebugFlag(tsCfg, pItem->i32)); + TAOS_CHECK_GOTO(taosSetAllDebugFlag(tsCfg, pItem->i32), &lino, _exit); cfgDumpCfg(tsCfg, tsc, false); - TAOS_CHECK_RETURN(taosCheckGlobalCfg()); + TAOS_CHECK_GOTO(taosCheckGlobalCfg(), &lino, _exit); - TAOS_RETURN(TSDB_CODE_SUCCESS); +_exit: + if (TSDB_CODE_SUCCESS != code) { + cfgCleanup(tsCfg); + tsCfg = NULL; + uError("failed to init cfg at %d since %s", lino, tstrerror(code)); + } + + TAOS_RETURN(code); } void taosCleanupCfg() { diff --git a/source/os/src/osLocale.c b/source/os/src/osLocale.c index c846ca82a3..2f835a7a27 100644 --- a/source/os/src/osLocale.c +++ b/source/os/src/osLocale.c @@ -75,7 +75,7 @@ char *taosCharsetReplace(char *charsetstr) { * * In case that the setLocale failed to be executed, the right charset needs to be set. */ -int32_t taosSetSystemLocale(const char *inLocale, const char *inCharSet) {\ +int32_t taosSetSystemLocale(const char *inLocale, const char *inCharSet) { if (!taosValidateEncodec(inCharSet)) { return terrno; } From d7ee1ec49bd246ff31718f18162e56fb6564859a Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 2 Aug 2024 11:16:13 +0800 Subject: [PATCH 33/60] fix taos -s caused memory leak and crash --- source/client/inc/clientInt.h | 1 + source/client/src/clientEnv.c | 8 +------- source/client/src/clientImpl.c | 4 ++++ source/client/src/clientMain.c | 3 ++- tools/shell/src/shellEngine.c | 1 + 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 928afdaecf..30424adecd 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -373,6 +373,7 @@ int taos_options_imp(TSDB_OPTION option, const char* str); int32_t openTransporter(const char* user, const char* auth, int32_t numOfThreads, void **pDnodeConn); void tscStopCrashReport(); +void cleanupAppInfo(); typedef struct AsyncArg { SRpcMsg msg; diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 2258a4bc8c..35e6651c41 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -688,7 +688,6 @@ void doDestroyRequest(void *p) { taosArrayDestroy(pRequest->tableList); taosArrayDestroy(pRequest->targetTableList); - destroyQueryExecRes(&pRequest->body.resInfo.execRes); if (pRequest->self) { @@ -702,12 +701,7 @@ void doDestroyRequest(void *p) { } taosMemoryFree(pRequest->body.interParam); - if (TSDB_CODE_SUCCESS == nodesSimAcquireAllocator(pRequest->allocatorRefId)) { - qDestroyQuery(pRequest->pQuery); - if (TSDB_CODE_SUCCESS != nodesSimReleaseAllocator(pRequest->allocatorRefId)) { - tscError("failed to release allocator"); - } - } + qDestroyQuery(pRequest->pQuery); nodesDestroyAllocator(pRequest->allocatorRefId); taosMemoryFreeClear(pRequest->effectiveUser); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 75c1eabe7e..4aa78caa15 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -68,6 +68,10 @@ bool chkRequestKilled(void* param) { return killed; } +void cleanupAppInfo() { + taosHashCleanup(appInfo.pInstMap); +} + static int32_t taosConnectImpl(const char* user, const char* auth, const char* db, __taos_async_fn_t fp, void* param, SAppInstInfo* pAppInfo, int connType, STscObj** pTscObj); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 0a5fb1a7b4..12702a93f3 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -67,7 +67,6 @@ void taos_cleanup(void) { fmFuncMgtDestroy(); qCleanupKeywordsTable(); - nodesDestroyAllocatorSet(); if (TSDB_CODE_SUCCESS != cleanupTaskQueue()) { tscWarn("failed to cleanup task queue"); @@ -85,6 +84,8 @@ void taos_cleanup(void) { tscWarn("failed to close clientReqRefPool"); } + nodesDestroyAllocatorSet(); + cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index dbaf108e6a..0ccbd683dc 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -1170,6 +1170,7 @@ bool shellGetGrantInfo(char* buf) { code != TSDB_CODE_PAR_PERMISSION_DENIED) { fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\r\n\r\n", code, taos_errstr(tres)); } + taos_free_result(tres); return community; } From 42c11e9e36424c60d88912ee4390680aca9f1053 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 11:38:30 +0800 Subject: [PATCH 34/60] refactor: do some internal refactor. --- source/dnode/mnode/impl/src/mndStream.c | 145 +++++++++++++++--------- 1 file changed, 91 insertions(+), 54 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index de10e991d3..25c94fda56 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -293,10 +293,13 @@ static int32_t createSchemaByFields(const SArray *pFields, SSchemaWrapper *pWrap return terrno; } - SNode *pNode; int32_t index = 0; for (int32_t i = 0; i < pWrapper->nCols; i++) { SField *pField = (SField *)taosArrayGet(pFields, i); + if (pField == NULL) { + return terrno; + } + if (TSDB_DATA_TYPE_NULL == pField->type) { pWrapper->pSchema[index].type = TSDB_DATA_TYPE_VARCHAR; pWrapper->pSchema[index].bytes = VARSTR_HEADER_SIZE; @@ -609,6 +612,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre // build fields for (int32_t i = 0; i < createReq.numOfColumns; i++) { SFieldWithOptions *pField = taosArrayGet(createReq.pColumns, i); + TSDB_CHECK_NULL(pField, code, lino, _OVER, terrno); + tstrncpy(pField->name, pStream->outputSchema.pSchema[i].name, TSDB_COL_NAME_LEN); pField->flags = pStream->outputSchema.pSchema[i].flags; pField->type = pStream->outputSchema.pSchema[i].type; @@ -623,6 +628,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre // build tags SField *pField = taosArrayGet(createReq.pTags, 0); + TSDB_CHECK_NULL(pField, code, lino, _OVER, terrno); + strcpy(pField->name, "group_id"); pField->type = TSDB_DATA_TYPE_UBIGINT; pField->flags = 0; @@ -634,6 +641,10 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre for (int32_t i = 0; i < createReq.numOfTags; i++) { SField *pField = taosArrayGet(createReq.pTags, i); + if (pField == NULL) { + continue; + } + pField->bytes = pStream->tagSchema.pSchema[i].bytes; pField->flags = pStream->tagSchema.pSchema[i].flags; pField->type = pStream->tagSchema.pSchema[i].type; @@ -920,10 +931,9 @@ int64_t mndStreamGenChkptId(SMnode *pMnode, bool lock) { } for (int32_t i = 0; i < taosArrayGetSize(execInfo.pTaskList); ++i) { - STaskId *p = taosArrayGet(execInfo.pTaskList, i); - + STaskId *p = taosArrayGet(execInfo.pTaskList, i); STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, p, sizeof(*p)); - if (pEntry == NULL) { + if (p == NULL || pEntry == NULL) { continue; } @@ -967,8 +977,7 @@ static int32_t mndBuildStreamCheckpointSourceReq(void **pBuf, int32_t *pLen, int tEncodeSize(tEncodeStreamCheckpointSourceReq, &req, blen, code); if (code < 0) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); } int32_t tlen = sizeof(SMsgHead) + blen; @@ -1002,18 +1011,20 @@ static int32_t doSetCheckpointAction(SMnode *pMnode, STrans *pTrans, SStreamTask int8_t mndTrigger) { void *buf; int32_t tlen; - if (mndBuildStreamCheckpointSourceReq(&buf, &tlen, pTask->info.nodeId, checkpointId, pTask->id.streamId, - pTask->id.taskId, pTrans->id, mndTrigger) < 0) { - taosMemoryFree(buf); - return -1; - } - + int32_t code = 0; SEpSet epset = {0}; bool hasEpset = false; - int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); + + if ((code = mndBuildStreamCheckpointSourceReq(&buf, &tlen, pTask->info.nodeId, checkpointId, pTask->id.streamId, + pTask->id.taskId, pTrans->id, mndTrigger)) < 0) { + taosMemoryFree(buf); + return code; + } + + code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); if (code != TSDB_CODE_SUCCESS || !hasEpset) { taosMemoryFree(buf); - return -1; + return code; } code = setTransAction(pTrans, buf, tlen, TDMT_VND_STREAM_CHECK_POINT_SOURCE, &epset, TSDB_CODE_SYN_PROPOSE_NOT_READY, @@ -1130,6 +1141,10 @@ static bool taskNodeIsUpdated(SMnode *pMnode) { for (int32_t i = 0; i < numOfNodes; ++i) { SNodeEntry *pNodeEntry = taosArrayGet(execInfo.pNodeList, i); + if (pNodeEntry == NULL) { + continue; + } + if (pNodeEntry->stageUpdated) { mDebug("stream task not ready due to node update detected, checkpoint not issued"); streamMutexUnlock(&execInfo.lock); @@ -1170,7 +1185,7 @@ static bool taskNodeIsUpdated(SMnode *pMnode) { static int32_t mndCheckTaskAndNodeStatus(SMnode *pMnode) { bool ready = true; if (taskNodeIsUpdated(pMnode)) { - return -1; + TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS); } streamMutexLock(&execInfo.lock); @@ -1240,7 +1255,7 @@ int64_t getStreamTaskLastReadyState(SArray *pTaskList, int64_t streamId) { for (int32_t i = 0; i < taosArrayGetSize(pTaskList); ++i) { STaskId *p = taosArrayGet(pTaskList, i); STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, p, sizeof(*p)); - if (pEntry == NULL || pEntry->id.streamId != streamId) { + if (p == NULL || pEntry == NULL || pEntry->id.streamId != streamId) { continue; } @@ -1278,13 +1293,12 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { int32_t numOfCheckpointTrans = 0; if ((code = mndCheckTaskAndNodeStatus(pMnode)) != 0) { - terrno = TSDB_CODE_STREAM_TASK_IVLD_STATUS; - return -1; + TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS); } SArray *pList = taosArrayInit(4, sizeof(SCheckpointInterval)); if (pList == NULL) { - return -1; + return terrno; } int64_t now = taosGetTimestampMs(); @@ -1353,6 +1367,9 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { for (int32_t i = 0; i < numOfQual; ++i) { SCheckpointInterval *pCheckpointInfo = taosArrayGet(pList, i); + if (pCheckpointInfo == NULL) { + continue; + } SStreamObj *p = NULL; code = mndGetStreamObj(pMnode, pCheckpointInfo->streamId, &p); @@ -1521,8 +1538,9 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { } int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + int32_t code = 0; while (1) { SStreamObj *pStream = NULL; @@ -1535,18 +1553,8 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { sdbCancelFetch(pSdb, pIter); mError("db:%s, failed to drop stream:%s since sourceDbUid:%" PRId64 " not match with targetDbUid:%" PRId64, pDb->name, pStream->name, pStream->sourceDbUid, pStream->targetDbUid); - terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED; - return -1; + TAOS_RETURN(TSDB_CODE_MND_STREAM_MUST_BE_DELETED); } else { -#if 0 - if (mndStreamSetDropAction(pMnode, pTrans, pStream) < 0) { - mError("stream:%s, failed to drop task since %s", pStream->name, terrstr()); - sdbRelease(pMnode->pSdb, pStream); - sdbCancelFetch(pSdb, pIter); - return -1; - } -#endif - // kill the related checkpoint trans int32_t transId = mndStreamGetRelTrans(pMnode, pStream->uid); if (transId != 0) { @@ -1557,10 +1565,11 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { // drop the stream obj in execInfo removeStreamTasksInBuf(pStream, &execInfo); - if (mndPersistTransLog(pStream, pTrans, SDB_STATUS_DROPPED) < 0) { + code = mndPersistTransLog(pStream, pTrans, SDB_STATUS_DROPPED); + if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) { sdbRelease(pSdb, pStream); sdbCancelFetch(pSdb, pIter); - return -1; + return code; } } } @@ -1575,8 +1584,7 @@ int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams) SSdb *pSdb = pMnode->pSdb; SDbObj *pDb = mndAcquireDb(pMnode, dbName); if (pDb == NULL) { - terrno = TSDB_CODE_MND_DB_NOT_SELECTED; - return -1; + TAOS_RETURN(TSDB_CODE_MND_DB_NOT_SELECTED); } int32_t numOfStreams = 0; @@ -1704,8 +1712,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { SMPauseStreamReq pauseReq = {0}; if (tDeserializeSMPauseStreamReq(pReq->pCont, pReq->contLen, &pauseReq) < 0) { - terrno = TSDB_CODE_INVALID_MSG; - return -1; + TAOS_RETURN(TSDB_CODE_INVALID_MSG); } code = mndAcquireStream(pMnode, pauseReq.name, &pStream); @@ -1715,8 +1722,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { return 0; } else { mError("stream:%s not exist, failed to pause stream", pauseReq.name); - terrno = TSDB_CODE_MND_STREAM_NOT_EXIST; - return -1; + TAOS_RETURN(TSDB_CODE_MND_STREAM_NOT_EXIST); } } @@ -1736,14 +1742,14 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_PAUSE_NAME, true); if (conflict) { sdbRelease(pMnode->pSdb, pStream); - return -1; + TAOS_RETURN(TSDB_CODE_MND_TRANS_CONFLICT); } bool updated = taskNodeIsUpdated(pMnode); if (updated) { mError("tasks are not ready for pause, node update detected"); sdbRelease(pMnode->pSdb, pStream); - return -1; + TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS); } { // check for tasks, if tasks are not ready, not allowed to pause @@ -1753,6 +1759,9 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { for (int32_t i = 0; i < taosArrayGetSize(execInfo.pTaskList); ++i) { STaskId *p = taosArrayGet(execInfo.pTaskList, i); + if (p == NULL) { + continue; + } STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, p, sizeof(*p)); if (pEntry == NULL) { @@ -1776,13 +1785,13 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { if (!found) { mError("stream:%s task not report status yet, not ready for pause", pauseReq.name); sdbRelease(pMnode->pSdb, pStream); - return -1; + TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS); } if (!readyToPause) { mError("stream:%s task not ready for pause yet", pauseReq.name); sdbRelease(pMnode->pSdb, pStream); - return -1; + TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS); } } @@ -1843,13 +1852,12 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { int32_t code = 0; if ((terrno = grantCheckExpire(TSDB_GRANT_STREAMS)) < 0) { - return -1; + return terrno; } SMResumeStreamReq resumeReq = {0}; if (tDeserializeSMResumeStreamReq(pReq->pCont, pReq->contLen, &resumeReq) < 0) { - terrno = TSDB_CODE_INVALID_MSG; - return -1; + TAOS_RETURN(TSDB_CODE_INVALID_MSG); } code = mndAcquireStream(pMnode, resumeReq.name, &pStream); @@ -1860,8 +1868,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { return 0; } else { mError("stream:%s not exist, failed to resume stream", resumeReq.name); - terrno = TSDB_CODE_MND_STREAM_NOT_EXIST; - return -1; + TAOS_RETURN(TSDB_CODE_MND_STREAM_NOT_EXIST); } } @@ -1956,10 +1963,16 @@ static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pP int32_t numOfNodes = taosArrayGetSize(pPrevNodeList); for (int32_t i = 0; i < numOfNodes; ++i) { SNodeEntry *pPrevEntry = taosArrayGet(pPrevNodeList, i); + if (pPrevEntry == NULL) { + continue; + } int32_t num = taosArrayGetSize(pNodeList); for (int32_t j = 0; j < num; ++j) { SNodeEntry *pCurrent = taosArrayGet(pNodeList, j); + if(pCurrent == NULL) { + continue; + } if (pCurrent->nodeId == pPrevEntry->nodeId) { if (pPrevEntry->stageUpdated || isNodeEpsetChanged(&pPrevEntry->epset, &pCurrent->epset)) { @@ -2302,7 +2315,7 @@ void saveTaskAndNodeInfoIntoBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode) bool exist = false; for (int32_t j = 0; j < taosArrayGetSize(pExecNode->pNodeList); ++j) { SNodeEntry *pEntry = taosArrayGet(pExecNode->pNodeList, j); - if (pEntry->nodeId == pTask->info.nodeId) { + if ((pEntry != NULL) && (pEntry->nodeId == pTask->info.nodeId)) { exist = true; break; } @@ -2329,14 +2342,17 @@ static void doAddTaskId(SArray *pList, int32_t taskId, int64_t uid, int32_t numO int32_t num = taosArrayGetSize(pList); for (int32_t i = 0; i < num; ++i) { int32_t *pId = taosArrayGet(pList, i); + if (pId == NULL) { + continue; + } + if (taskId == *pId) { return; } } - void* p = taosArrayPush(pList, &taskId); int32_t numOfTasks = taosArrayGetSize(pList); - + void *p = taosArrayPush(pList, &taskId); if (p) { mDebug("stream:0x%" PRIx64 " receive %d reqs for checkpoint, remain:%d", uid, numOfTasks, numOfTotal - numOfTasks); } else { @@ -2445,6 +2461,10 @@ static void doAddReportStreamTask(SArray* pList, const SCheckpointReport* pRepor bool existed = false; for (int32_t i = 0; i < taosArrayGetSize(pList); ++i) { STaskChkptInfo *p = taosArrayGet(pList, i); + if (p == NULL) { + continue; + } + if (p->taskId == pReport->taskId) { existed = true; break; @@ -2554,6 +2574,10 @@ static int64_t getConsensusId(int64_t streamId, int32_t numOfTasks, int32_t* pEx for(int32_t i = 0; i < taosArrayGetSize(execInfo.pTaskList); ++i) { STaskId* p = taosArrayGet(execInfo.pTaskList, i); + if (p == NULL) { + continue; + } + if (p->streamId != streamId) { continue; } @@ -2634,6 +2658,10 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) { for (int32_t j = 0; j < num; ++j) { SCheckpointConsensusEntry *pe = taosArrayGet(pInfo->pTaskList, j); + if (pe == NULL) { + continue; + } + streamId = pe->req.streamId; int32_t existed = 0; @@ -2670,9 +2698,13 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) { if (taosArrayGetSize(pList) > 0) { for (int32_t i = 0; i < taosArrayGetSize(pList); ++i) { int32_t *taskId = taosArrayGet(pList, i); + if (taskId == NULL) { + continue; + } + for (int32_t k = 0; k < taosArrayGetSize(pInfo->pTaskList); ++k) { SCheckpointConsensusEntry *pe = taosArrayGet(pInfo->pTaskList, k); - if (pe->req.taskId == *taskId) { + if ((pe != NULL) && (pe->req.taskId == *taskId)) { taosArrayRemove(pInfo->pTaskList, k); break; } @@ -2694,6 +2726,10 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) { for (int32_t i = 0; i < taosArrayGetSize(pStreamList); ++i) { int64_t *pStreamId = (int64_t *)taosArrayGet(pStreamList, i); + if (pStreamId == NULL) { + continue; + } + code = mndClearConsensusCheckpointId(execInfo.pStreamConsensus, *pStreamId); } @@ -2828,7 +2864,7 @@ static int32_t mndProcessDropOrphanTaskReq(SRpcMsg *pReq) { // check if it is conflict with other trans in both sourceDb and targetDb. bool conflict = mndStreamTransConflictCheck(pMnode, pTask->streamId, MND_STREAM_DROP_NAME, false); if (conflict) { - return -1; + TAOS_RETURN(TSDB_CODE_MND_TRANS_CONFLICT); } SStreamObj dummyObj = {.uid = pTask->streamId, .sourceDb = "", .targetSTbName = ""}; @@ -2841,6 +2877,7 @@ static int32_t mndProcessDropOrphanTaskReq(SRpcMsg *pReq) { code = mndStreamRegisterTrans(pTrans, MND_STREAM_DROP_NAME, pTask->streamId); if (code) { + mndTransDrop(pTrans); return code; } From 079f6358aa38cf14ce9d0bbd7c4aa670c9278aca Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 11:49:21 +0800 Subject: [PATCH 35/60] refactor: do some internal refactor. --- source/dnode/mnode/impl/src/mndStream.c | 7 ++----- source/dnode/mnode/impl/src/mndStreamTrans.c | 13 ++++++------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 25c94fda56..ccf22cfe85 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -749,6 +749,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { int32_t sqlLen = 0; const char *pMsg = "create stream tasks on dnodes"; int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; terrno = TSDB_CODE_SUCCESS; SCMCreateStreamReq createReq = {0}; @@ -788,11 +789,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { if (createReq.sql != NULL) { sqlLen = strlen(createReq.sql); sql = taosMemoryMalloc(sqlLen + 1); - if (sql == NULL) { - code = terrno; - goto _OVER; - } - + TSDB_CHECK_NULL(sql, code, lino, _OVER, terrno); memset(sql, 0, sqlLen + 1); memcpy(sql, createReq.sql, sqlLen); } diff --git a/source/dnode/mnode/impl/src/mndStreamTrans.c b/source/dnode/mnode/impl/src/mndStreamTrans.c index 54f4189194..40f3d34055 100644 --- a/source/dnode/mnode/impl/src/mndStreamTrans.c +++ b/source/dnode/mnode/impl/src/mndStreamTrans.c @@ -167,29 +167,28 @@ int32_t mndStreamGetRelTrans(SMnode *pMnode, int64_t streamId) { } int32_t doCreateTrans(SMnode *pMnode, SStreamObj *pStream, SRpcMsg *pReq, ETrnConflct conflict, const char *name, - const char *pMsg, STrans ** pTrans1) { + const char *pMsg, STrans **pTrans1) { *pTrans1 = NULL; terrno = 0; + int32_t code = 0; STrans *p = mndTransCreate(pMnode, TRN_POLICY_RETRY, conflict, pReq, name); if (p == NULL) { - mError("failed to build trans:%s, reason: %s", name, tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - terrno = TSDB_CODE_OUT_OF_MEMORY; + mError("failed to build trans:%s, reason: %s", name, tstrerror(terrno)); return terrno; } mInfo("stream:0x%" PRIx64 " start to build trans %s, transId:%d", pStream->uid, pMsg, p->id); mndTransSetDbName(p, pStream->sourceDb, pStream->targetSTbName); - if (mndTransCheckConflict(pMnode, p) != 0) { - terrno = TSDB_CODE_MND_TRANS_CONFLICT; + if ((code = mndTransCheckConflict(pMnode, p)) != 0) { mError("failed to build trans:%s for stream:0x%" PRIx64 " code:%s", name, pStream->uid, tstrerror(terrno)); mndTransDrop(p); - return terrno; + return code; } *pTrans1 = p; - return 0; + return code; } SSdbRaw *mndStreamActionEncode(SStreamObj *pStream) { From 618c2a42e6dbda1d6003b31fc6e939cd839f00af Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 2 Aug 2024 12:32:32 +0800 Subject: [PATCH 36/60] fix(cmake/s3): switch s3 off with community version --- cmake/cmake.options | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmake/cmake.options b/cmake/cmake.options index fc17ddecf6..2158157780 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -146,6 +146,13 @@ option( ENDIF () +IF(NOT TD_ENTERPRISE) +MESSAGE("switch s3 off with community version") +set(BUILD_S3 OFF) +set(BUILD_WITH_S3 OFF) +set(BUILD_WITH_COS OFF) +ENDIF () + IF(${BUILD_S3}) IF(${BUILD_WITH_S3}) From 361d3634ffa85d2c5e5c6ad73568bc6278e3bdd1 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 13:21:46 +0800 Subject: [PATCH 37/60] test: scan returned values in ci --- source/libs/parser/src/parAstCreater.c | 1 + source/libs/parser/src/parAstParser.c | 2 +- tests/ci/scan.py | 106 ++++++++++++++++++++++ tests/ci/scan_file_path.py | 9 +- tests/parallel_test/run_scan_container.sh | 22 +++-- 5 files changed, 128 insertions(+), 12 deletions(-) create mode 100644 tests/ci/scan.py diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index cd7cda01e0..f8b3cebaa9 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -207,6 +207,7 @@ static bool checkIndexName(SAstCreateContext* pCxt, SToken* pIndexName) { return false; } return true; + } static bool checkTopicName(SAstCreateContext* pCxt, SToken* pTopicName) { diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 49be0b8d90..f0ecc14588 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -725,7 +725,7 @@ static int32_t collectMetaKeyFromShowCreateView(SCollectMetaKeyCxt* pCxt, SShowC strcpy(name.dbname, pStmt->dbName); strcpy(name.tname, pStmt->viewName); char dbFName[TSDB_DB_FNAME_LEN]; - (void)tNameGetFullDbName(&name, dbFName); + tNameGetFullDbName(&name, dbFName); int32_t code = catalogRemoveViewMeta(pCxt->pParseCxt->pCatalog, dbFName, 0, pStmt->viewName, 0); if (TSDB_CODE_SUCCESS == code) { code = reserveViewUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName, pStmt->viewName, diff --git a/tests/ci/scan.py b/tests/ci/scan.py new file mode 100644 index 0000000000..c542fdf448 --- /dev/null +++ b/tests/ci/scan.py @@ -0,0 +1,106 @@ +import os +import subprocess +import csv +from datetime import datetime +from loguru import logger + +# log file path +log_file_path = "/root/charles/scan.log" +logger.add(log_file_path, rotation="10MB", retention="7 days", level="DEBUG") +# scan result base path +scan_result_base_path = "/root/charles/clang_scan_result/" +# the base source code file path +source_path = "/root/charles/TDinternal/" +# the compile commands json file path +compile_commands_path = "/root/charles/TDinternal/debug/compile_commands.json" +# the ast parser rule for c file +clang_scan_rules_path = "/root/charles/clang_scan_rules" +# all the c files path will be checked +all_file_path = [] + +class CommandExecutor: + def __init__(self): + self._process = None + + def execute(self, command, timeout=None): + try: + self._process = subprocess.Popen(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = self._process.communicate(timeout=timeout) + return stdout.decode('utf-8'), stderr.decode('utf-8') + except subprocess.TimeoutExpired: + self._process.kill() + self._process.communicate() + raise Exception("Command execution timeout") + except Exception as e: + raise Exception("Command execution failed: %s" % e) + +def scan_files_path(source_file_path): + # scan_dir_list = ["source", "include", "docs/examples", "tests/script/api", "src/plugins"] + scan_dir_list = ["source", "include", "docs/examples", "src/plugins"] + scan_skip_file_list = ["/root/charles/TDinternal/community/tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", + "/test/", "contrib", "debug", "deps", "/root/charles/TDinternal/community/source/libs/parser/src/sql.c", "/root/charles/TDinternal/community/source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"] + for root, dirs, files in os.walk(source_file_path): + for file in files: + if any(item in root for item in scan_dir_list): + file_path = os.path.join(root, file) + if (file_path.endswith(".c") or file_path.endswith(".h") or file_path.endswith(".cpp")) and all(item not in file_path for item in scan_skip_file_list): + all_file_path.append(file_path) + logger.info("Found %s files" % len(all_file_path)) + +def save_scan_res(res_base_path, file_path, out, err): + file_res_path = os.path.join(res_base_path, file_path.replace("/root/charles/", "").split(".")[0] + ".res") + if not os.path.exists(os.path.dirname(file_res_path)): + os.makedirs(os.path.dirname(file_res_path)) + logger.info("Save scan result to: %s" % file_res_path) + # save scan result + with open(file_res_path, "w") as f: + f.write(out) + f.write(err) + +def write_csv(file_path, data): + try: + with open(file_path, 'w') as f: + writer = csv.writer(f) + writer.writerows(data) + except Exception as ex: + raise Exception("Failed to write the csv file: {} with msg: {}".format(file_path, repr(ex))) + +if __name__ == "__main__": + command_executor = CommandExecutor() + # get all the c files path + scan_files_path(source_path) + res = [] + # create dir + current_time = datetime.now().strftime("%Y%m%d%H%M%S") + scan_result_path = os.path.join(scan_result_base_path, current_time) + if not os.path.exists(scan_result_path): + os.makedirs(scan_result_path) + for file in all_file_path: + cmd = "clang-query -p %s %s -f %s" % (compile_commands_path, file, clang_scan_rules_path) + try: + stdout, stderr = command_executor.execute(cmd) + lines = stdout.split("\n") + if lines[-2].endswith("matches.") or lines[-2].endswith("match."): + match_num = int(lines[-2].split(" ")[0]) + logger.info("The match lines of file %s: %s" % (file, match_num)) + if match_num > 0: + save_scan_res(scan_result_path, file, stdout, stderr) + res.append([file, match_num, 'Pass' if match_num == 0 else 'Fail']) + else: + logger.warning("The result of scan is invalid for: %s" % file) + except Exception as e: + logger.error("Execute command failed: %s" % e) + # data = "" + # for item in res: + # data += item[0] + "," + str(item[1]) + "\n" + # logger.info("Csv data: %s" % data) + write_csv(os.path.join(scan_result_path, "scan_res.csv"), res) + logger.info("The result of scan: \n") + logger.info("Total files: %s" % len(res)) + logger.info("Total match lines: %s" % sum([item[1] for item in res])) + logger.info("Pass files: %s" % len([item for item in res if item[2] == 'Pass'])) + logger.info("Fail files: %s" % len([item for item in res if item[2] == 'Fail'])) + diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 6058cbb72f..0e47387f67 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -57,7 +57,9 @@ else: # log file path -log_file_path = f"{work_path}/{branch_name}/" +current_time = datetime.now().strftime("%Y%m%d-%H%M%S") +log_file_path = f"{work_path}/scan_log/scan_{branch_name}_{current_time}/" + os.makedirs(log_file_path, exist_ok=True) scan_log_file = f"{log_file_path}/scan.log" @@ -175,8 +177,9 @@ if __name__ == "__main__": web_path = [] res.append(["scan_source_file", "scan_result_file", "match_num", "check_result"]) # create dir - current_time = datetime.now().strftime("%Y%m%d%H%M%S") - scan_result_path = os.path.join(scan_result_base_path, current_time) + # current_time = datetime.now().strftime("%Y%m%d%H%M%S") + # scan_result_path = os.path.join(scan_result_base_path, current_time) + scan_result_path = scan_result_base_path if not os.path.exists(scan_result_path): os.makedirs(scan_result_path) for file in all_file_path: diff --git a/tests/parallel_test/run_scan_container.sh b/tests/parallel_test/run_scan_container.sh index 078757a57a..3d6d1d7c30 100755 --- a/tests/parallel_test/run_scan_container.sh +++ b/tests/parallel_test/run_scan_container.sh @@ -62,28 +62,34 @@ REP_MOUNT_PARAM="$INTERNAL_REPDIR:/home/TDinternal" CONTAINER_TESTDIR=/home/TDinternal/community -#scan file log path -scan_temp="$WORKDIR/log/${branch_name_id}/" -docker_scan_temp="/home/${branch_name_id}/" -mkdir -p $scan_temp -mkdir -p $docker_scan_temp +#scan change file path +scan_changefile_temp_path="$WORKDIR/tmp/${branch_name_id}/" +docker_can_changefile_temp_path="/home/tmp/${branch_name_id}/" +mkdir -p $scan_changefile_temp_path +scan_file_name="$docker_can_changefile_temp_path/docs_changed.txt" + +#scan log file path +scan_log_temp_path="$WORKDIR/scan_log/" +docker_scan_log_temp_path="/home/scan_log/" +mkdir -p $scan_log_temp_path scan_scripts="$CONTAINER_TESTDIR/tests/ci/scan_file_path.py" -scan_file_name="$docker_scan_temp/docs_changed.txt" ulimit -c unlimited cat << EOF docker run \ -v $REP_MOUNT_PARAM \ -v $REP_MOUNT_DEBUG \ - -v $scan_temp:$docker_scan_temp \ + -v $scan_changefile_temp_path:$docker_can_changefile_temp_path \ + -v $scan_log_temp_path:$docker_scan_log_temp_path \ --rm --ulimit core=-1 taos_test:v1.0 python3 $scan_scripts -b "${branch_name_id}" -f "${scan_file_name}" -w ${web_server} EOF docker run \ -v $REP_MOUNT_PARAM \ -v $REP_MOUNT_DEBUG \ - -v $scan_temp:$docker_scan_temp \ + -v $scan_changefile_temp_path:$docker_can_changefile_temp_path \ + -v $scan_log_temp_path:$docker_scan_log_temp_path \ --rm --ulimit core=-1 taos_test:v1.0 python3 $scan_scripts -b "${branch_name_id}" -f "${scan_file_name}" -w ${web_server} From 156b3d992a4acf6dd47d1c4847346ef1ccd2b937 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 13:22:10 +0800 Subject: [PATCH 38/60] test: scan returned values in ci --- Jenkinsfile2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 8d4652943d..de96a23d51 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -419,8 +419,8 @@ pipeline { pre_test() script { sh ''' - mkdir -p ${WK}/../log/${BRANCH_NAME}_${BUILD_ID} - echo "''' + env.FILE_CHANGED + '''" > ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt + mkdir -p ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID} + echo "''' + env.FILE_CHANGED + '''" > ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' sh ''' date @@ -455,7 +455,7 @@ pipeline { } sh ''' cd ${WKC}/tests/parallel_test - ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WK}/../log/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' + ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' sh ''' cd ${WKC}/tests/parallel_test From 857d9e825b5e9962a25b90838057debcd0583a68 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 2 Aug 2024 13:29:47 +0800 Subject: [PATCH 39/60] enh: add rand error test --- cmake/cmake.define | 6 ++++ include/os/os.h | 3 ++ source/common/src/tglobal.c | 5 +++ source/libs/qworker/src/qwUtil.c | 2 ++ source/libs/qworker/src/qworker.c | 8 +++++ source/os/CMakeLists.txt | 3 ++ source/os/src/osMemory.c | 54 +++++++++++++++++++++++++++++++ 7 files changed, 81 insertions(+) diff --git a/cmake/cmake.define b/cmake/cmake.define index f1a5cef67e..eb78b54cae 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -16,6 +16,12 @@ if (NOT DEFINED TD_GRANT) SET(TD_GRANT FALSE) endif() +IF (NOT DEFINED BUILD_WITH_RAND_ERR) + SET(BUILD_WITH_RAND_ERR FALSE) +ELSE () + SET(BUILD_WITH_RAND_ERR TRUE) +endif() + IF ("${WEBSOCKET}" MATCHES "true") SET(TD_WEBSOCKET TRUE) MESSAGE("Enable websocket") diff --git a/include/os/os.h b/include/os/os.h index 1749687d97..e71c76bdb3 100644 --- a/include/os/os.h +++ b/include/os/os.h @@ -124,6 +124,9 @@ extern "C" { #include "taoserror.h" #include "tlog.h" +extern int32_t tsRandErrChance; +extern threadlocal bool tsEnableRandErr; + #ifdef __cplusplus } #endif diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 46219fe34c..c3101d574a 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -319,6 +319,7 @@ int32_t tsMaxTsmaNum = 3; int32_t tsMaxTsmaCalcDelay = 600; int64_t tsmaDataDeleteMark = 1000 * 60 * 60 * 24; // in ms, default to 1d + #define TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, pName) \ if ((pItem = cfgGetItem(pCfg, pName)) == NULL) { \ TAOS_RETURN(TSDB_CODE_CFG_NOT_FOUND); \ @@ -732,6 +733,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "compactPullupInterval", tsCompactPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "randErrorChance", tsRandErrChance, 0, 10000, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushIntervalSec, 1, 100000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); @@ -1408,6 +1410,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "mqRebalanceInterval"); tsMqRebalanceInterval = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "randErrorChance"); + tsRandErrChance = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "ttlUnit"); tsTtlUnit = pItem->i32; diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index 0b404ec176..441714313c 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -276,7 +276,9 @@ void qwFreeTaskHandle(qTaskInfo_t *taskHandle) { // Note: free/kill may in RC qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle); if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) { + tsEnableRandErr = true; qDestroyTask(otaskHandle); + tsEnableRandErr = false; qDebug("task handle destroyed"); } } diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 6e684545f7..1311179e92 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -161,7 +161,9 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { if (taskHandle) { qwDbgSimulateSleep(); + tsEnableRandErr = true; code = qExecTaskOpt(taskHandle, pResList, &useconds, &hasMore, &localFetch); + tsEnableRandErr = false; if (code) { if (code != TSDB_CODE_OPS_NOT_SUPPORT) { QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); @@ -768,8 +770,11 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) { QW_ERR_JRET(code); } + tsEnableRandErr = true; code = qCreateExecTask(qwMsg->node, mgmt->nodeId, tId, plan, &pTaskInfo, &sinkHandle, qwMsg->msgInfo.compressMsg, sql, OPTR_EXEC_MODEL_BATCH); + tsEnableRandErr = false; + sql = NULL; if (code) { QW_TASK_ELOG("qCreateExecTask failed, code:%x - %s", code, tstrerror(code)); @@ -1266,7 +1271,10 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SDeleteRes *pRes) { QW_ERR_JRET(code); } + tsEnableRandErr = true; code = qCreateExecTask(qwMsg->node, mgmt->nodeId, tId, plan, &pTaskInfo, &sinkHandle, 0, NULL, OPTR_EXEC_MODEL_BATCH); + tsEnableRandErr = false; + if (code) { QW_TASK_ELOG("qCreateExecTask failed, code:%x - %s", code, tstrerror(code)); QW_ERR_JRET(code); diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt index 6438ce7ed0..32609301a9 100644 --- a/source/os/CMakeLists.txt +++ b/source/os/CMakeLists.txt @@ -21,6 +21,9 @@ endif () if(USE_TD_MEMORY) add_definitions(-DUSE_TD_MEMORY) endif () +if(BUILD_WITH_RAND_ERR) + add_definitions(-DBUILD_WITH_RAND_ERR) +endif () if(BUILD_ADDR2LINE) if(NOT TD_WINDOWS) target_include_directories( diff --git a/source/os/src/osMemory.c b/source/os/src/osMemory.c index 761815aa2c..6d29fdeccd 100644 --- a/source/os/src/osMemory.c +++ b/source/os/src/osMemory.c @@ -21,6 +21,10 @@ #endif #include "os.h" +int32_t tsRandErrChance = 1; +threadlocal bool tsEnableRandErr = 0; + + #if defined(USE_TD_MEMORY) || defined(USE_ADDR2LINE) #define TD_MEMORY_SYMBOL ('T' << 24 | 'A' << 16 | 'O' << 8 | 'S') @@ -266,6 +270,16 @@ void *taosMemoryMalloc(int64_t size) { return (char *)tmp + sizeof(TdMemoryInfo); #else + +#ifdef BUILD_WITH_RAND_ERR + if (tsEnableRandErr) { + uint32_t r = taosRand() % 10001; + if ((r + 1) <= tsRandErrChance) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + } +#endif void *p = malloc(size); if (NULL == p) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -287,6 +301,16 @@ void *taosMemoryCalloc(int64_t num, int64_t size) { return (char *)tmp + sizeof(TdMemoryInfo); #else +#ifdef BUILD_WITH_RAND_ERR + if (tsEnableRandErr) { + uint32_t r = taosRand() % 10001; + if ((r + 1) <= tsRandErrChance) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + } +#endif + void *p = calloc(num, size); if (NULL == p) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -317,6 +341,16 @@ void *taosMemoryRealloc(void *ptr, int64_t size) { return (char *)tmp + sizeof(TdMemoryInfo); #else +#ifdef BUILD_WITH_RAND_ERR + if (tsEnableRandErr) { + uint32_t r = taosRand() % 10001; + if ((r + 1) <= tsRandErrChance) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + } +#endif + void *p = realloc(ptr, size); if (size > 0 && NULL == p) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -342,6 +376,16 @@ char *taosStrdup(const char *ptr) { return (char *)tmp + sizeof(TdMemoryInfo); #else +#ifdef BUILD_WITH_RAND_ERR + if (tsEnableRandErr) { + uint32_t r = taosRand() % 10001; + if ((r + 1) <= tsRandErrChance) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + } +#endif + return tstrdup(ptr); #endif } @@ -398,6 +442,16 @@ void *taosMemoryMallocAlign(uint32_t alignment, int64_t size) { ASSERT(0); #else #if defined(LINUX) +#ifdef BUILD_WITH_RAND_ERR + if (tsEnableRandErr) { + uint32_t r = taosRand() % 10001; + if ((r + 1) <= tsRandErrChance) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + } +#endif + void *p = memalign(alignment, size); if (NULL == p) { if (ENOMEM == errno) { From 9feacd983fffd5dae64c8b288c1d12f5a28fef84 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 13:46:24 +0800 Subject: [PATCH 40/60] refactor: do some internal refactor. --- source/dnode/vnode/src/tq/tqSink.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 3b375f7f82..bd6db483f0 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -106,10 +106,14 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p strncpy(req.tbname, name, TSDB_TABLE_NAME_LEN - 1); void* p = taosArrayPush(deleteReq->deleteReqs, &req); if (p == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } } - if (originName) name = originName; + + if (originName) { + name = originName; + } + taosMemoryFreeClear(name); } @@ -190,6 +194,7 @@ int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const pCreateTableReq->ctb.stbName = taosStrdup((char*)tNameGetTableName(&name)); if (pCreateTableReq->ctb.stbName == NULL) { // ignore this error code tqError("failed to duplicate the stb name:%s, failed to init create-table msg and create req table", stbFullName); + code = TSDB_CODE_OUT_OF_MEMORY; } } @@ -202,14 +207,14 @@ int32_t createDefaultTagColName(SArray** pColNameList) { SArray* pTagColNameList = taosArrayInit(1, TSDB_COL_NAME_LEN); if (pTagColNameList == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } char tagNameStr[TSDB_COL_NAME_LEN] = "group_id"; void* p = taosArrayPush(pTagColNameList, tagNameStr); if (p == NULL) { taosArrayDestroy(pTagColNameList); - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } *pColNameList = pTagColNameList; @@ -252,15 +257,14 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S SArray* tagArray = taosArrayInit(4, sizeof(STagVal)); const char* id = pTask->id.idStr; int32_t vgId = pTask->pMeta->vgId; + int32_t code = 0; tqDebug("s-task:%s build create %d table(s) msg", id, rows); - - int32_t code = 0; - SVCreateTbBatchReq reqs = {0}; SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq)); - if (NULL == reqs.pArray) { + if ((NULL == reqs.pArray) || (tagArray == NULL)) { tqError("s-task:%s failed to init create table msg, code:%s", id, tstrerror(terrno)); + code = terrno; goto _end; } @@ -418,8 +422,8 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c int32_t j = 0, k = 0; SArray* pFinal = taosArrayInit(oldLen + newLen, POINTER_BYTES); if (pFinal == NULL) { - tqError("s-task:%s failed to prepare merge result datablock, code:%s", id, tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - return TSDB_CODE_OUT_OF_MEMORY; + tqError("s-task:%s failed to prepare merge result datablock, code:%s", id, tstrerror(terrno)); + return terrno; } while (j < newLen && k < oldLen) { @@ -872,6 +876,9 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat tqDebug("s-task:%s stream write into table:%s, table auto created", id, dstTableName); SArray* pTagArray = taosArrayInit(pTSchema->numOfCols + 1, sizeof(STagVal)); + if (pTagArray == NULL) { + return terrno; + } pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE; code = @@ -1167,6 +1174,9 @@ int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask, int64_t suid) { SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))}; + if (deleteReq.deleteReqs == NULL) { + return terrno; + } int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1); From ed1e3c304a627a3da0257691dda8ee7c18cc4808 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 14:03:07 +0800 Subject: [PATCH 41/60] test: scan returned values in ci --- tests/ci/scan_file_path.py | 18 +++++++++--------- tests/script/tsim/mnode/basic3.sim | 6 +++--- tests/system-test/2-query/tsma.py | 4 ++-- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 0e47387f67..f4cf25b8f7 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -179,11 +179,11 @@ if __name__ == "__main__": # create dir # current_time = datetime.now().strftime("%Y%m%d%H%M%S") # scan_result_path = os.path.join(scan_result_base_path, current_time) - scan_result_path = scan_result_base_path - if not os.path.exists(scan_result_path): - os.makedirs(scan_result_path) - for file in all_file_path: + # scan_result_path = scan_result_base_path + # if not os.path.exists(scan_result_path): + # os.makedirs(scan_result_path) + for file in all_file_path: cmd = f"clang-query-10 -p {compile_commands_path} {file} -f {clang_scan_rules_path}" logger.debug(f"cmd:{cmd}") try: @@ -195,9 +195,9 @@ if __name__ == "__main__": match_num = int(lines[-2].split(" ")[0]) logger.info("The match lines of file %s: %s" % (file, match_num)) if match_num > 0: - logger.info(f"scan_result_path: {scan_result_path} ,file:{file}") - save_scan_res(scan_result_path, file, stdout, stderr) - index_tests = file_res_path.find(branch_name) + logger.info(f"log_file_path: {log_file_path} ,file:{file}") + save_scan_res(log_file_path, file, stdout, stderr) + index_tests = file_res_path.find("scan_log") if index_tests != -1: web_path_file = file_res_path[index_tests:] web_path_file = os.path.join(web_server, web_path_file) @@ -212,8 +212,8 @@ if __name__ == "__main__": # for item in res: # data += item[0] + "," + str(item[1]) + "\n" # logger.info("Csv data: %s" % data) - write_csv(os.path.join(scan_result_path, "scan_res.csv"), res) - scan_result_log = f"{scan_result_path}/scan_res.csv" + write_csv(os.path.join(log_file_path, "scan_res.csv"), res) + scan_result_log = f"{log_file_path}/scan_res.csv" # delete the first element of res res= res[1:] logger.info("The result of scan: \n") diff --git a/tests/script/tsim/mnode/basic3.sim b/tests/script/tsim/mnode/basic3.sim index 02650ba10d..ff7c44b67d 100644 --- a/tests/script/tsim/mnode/basic3.sim +++ b/tests/script/tsim/mnode/basic3.sim @@ -36,7 +36,7 @@ if $data(3)[4] != ready then goto step1 endi -print =============== step2: create mnode 2 +print =============== step2: create mnode 2 3 sql create mnode on dnode 2 sql create mnode on dnode 3 sql_error create mnode on dnode 4 @@ -115,7 +115,7 @@ if $data(3)[4] != ready then goto step41 endi -print =============== step5: stop dnode1 +print =============== step5: stop dnode2 system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode2 -s stop @@ -154,7 +154,7 @@ if $data(3)[4] != ready then goto step51 endi -print =============== step6: stop dnode1 +print =============== step6: stop dnode3 system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s stop diff --git a/tests/system-test/2-query/tsma.py b/tests/system-test/2-query/tsma.py index 29a7562b45..fccf6291b5 100644 --- a/tests/system-test/2-query/tsma.py +++ b/tests/system-test/2-query/tsma.py @@ -1504,9 +1504,9 @@ class TDTestCase: # max number of list is 4093: 4096 - 3 - 2(原始表tag个数) - 1(tbname) tdSql.execute('use db4096') - self.create_tsma('tsma_4050', 'db4096', 'stb0', self.generate_tsma_function_list_columns(4050), '5m',check_tsma_calculation=False) + self.create_tsma('tsma_4050', 'db4096', 'stb0', self.generate_tsma_function_list_columns(4050), '5m',check_tsma_calculation=True) - self.create_tsma('tsma_4090', 'db4096', 'stb0', self.generate_tsma_function_list_columns(4090), '6m',check_tsma_calculation=False) + self.create_tsma('tsma_4090', 'db4096', 'stb0', self.generate_tsma_function_list_columns(4090), '6m',check_tsma_calculation=True) self.create_error_tsma('tsma_4091', 'db4096', 'stb0', self.generate_tsma_function_list_columns(4091), '5m', -2147473856) #Too many columns From 5e2c5b88941976c7e8297e3c83c7e876a90e663d Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 2 Aug 2024 14:38:55 +0800 Subject: [PATCH 42/60] fix: calander interval and sliding issue --- source/libs/parser/src/parTranslater.c | 22 ++++++++++++++++++---- tests/script/tsim/query/interval.sim | 4 ++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 85d9ddde85..7a2a73d013 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -5401,7 +5401,7 @@ static int32_t translateFill(STranslateContext* pCxt, SSelectStmt* pSelect, SInt return checkFill(pCxt, (SFillNode*)pInterval->pFill, (SValueNode*)pInterval->pInterval, false); } -static int32_t getMonthsFromTimeVal(int64_t val, int32_t fromPrecision, char unit, int64_t* pMonth) { +static int32_t getMonthsFromTimeVal(int64_t val, int32_t fromPrecision, char unit, double* pMonth) { int64_t days = -1; int32_t code = convertTimeFromPrecisionToUnit(val, fromPrecision, 'd', &days); if (TSDB_CODE_SUCCESS != code) { @@ -5416,7 +5416,7 @@ static int32_t getMonthsFromTimeVal(int64_t val, int32_t fromPrecision, char uni case 'h': case 'd': case 'w': - *pMonth = days / 28; + *pMonth = days / 28.0; return code; case 'n': *pMonth = val; @@ -5499,7 +5499,7 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_OFFSET_TOO_BIG); } if (!fixed) { - int64_t offsetMonth = 0, intervalMonth = 0; + double offsetMonth = 0, intervalMonth = 0; int32_t code = getMonthsFromTimeVal(pOffset->datum.i, precision, pOffset->unit, &offsetMonth); if (TSDB_CODE_SUCCESS != code) { return code; @@ -5530,7 +5530,21 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* (pInter->datum.i / pSliding->datum.i > INTERVAL_SLIDING_FACTOR)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_SLIDING_TOO_SMALL); } - if (pSliding->datum.i > pInter->datum.i) { + if (valInter) { + double slidingMonth = 0, intervalMonth = 0; + int32_t code = getMonthsFromTimeVal(pSliding->datum.i, precision, pSliding->unit, &slidingMonth); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + code = getMonthsFromTimeVal(pInter->datum.i, precision, pInter->unit, &intervalMonth); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + if (slidingMonth > intervalMonth) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_SLIDING_TOO_BIG); + } + } + if (!valInter && pSliding->datum.i > pInter->datum.i) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_SLIDING_TOO_BIG); } } diff --git a/tests/script/tsim/query/interval.sim b/tests/script/tsim/query/interval.sim index 7f950ea69c..bce55eda27 100644 --- a/tests/script/tsim/query/interval.sim +++ b/tests/script/tsim/query/interval.sim @@ -276,6 +276,10 @@ sql insert into t6 values ("2024-03-01 14:34:07.051", 66); sleep 300 +sql select _wstart, count(*) from stb interval(1n) sliding(1d); +sql select _wstart, count(*) from stb interval(1n) sliding(28d); +sql_error select _wstart, count(*) from stb interval(1n) sliding(29d); + sql select _wstart, count(*) from (select * from stb partition by tbname) interval(2s); print $data00,$data01 From 6f5d893cf668b114a68eeb53034aee886d361c95 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 14:44:08 +0800 Subject: [PATCH 43/60] test: scan returned values in ci --- tests/parallel_test/run_scan_container.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/run_scan_container.sh b/tests/parallel_test/run_scan_container.sh index 3d6d1d7c30..d16d1c3017 100755 --- a/tests/parallel_test/run_scan_container.sh +++ b/tests/parallel_test/run_scan_container.sh @@ -69,7 +69,7 @@ mkdir -p $scan_changefile_temp_path scan_file_name="$docker_can_changefile_temp_path/docs_changed.txt" #scan log file path -scan_log_temp_path="$WORKDIR/scan_log/" +scan_log_temp_path="$WORKDIR/log/scan_log/" docker_scan_log_temp_path="/home/scan_log/" mkdir -p $scan_log_temp_path From 9a8e74e0ed7e5f0d82b98cd4e861f5a80ddcb219 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Fri, 2 Aug 2024 15:02:12 +0800 Subject: [PATCH 44/60] fix: mem leak --- source/libs/executor/src/operator.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index f9ef57ec5e..701ed0ddbc 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -479,6 +479,9 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i); code = createOperator(pChildNode, pTaskInfo, pHandle, pTagCond, pTagIndexCond, pUser, dbname, &ops[i]); if (ops[i] == NULL || code != 0) { + for (int32_t j = 0; j < i; ++j) { + destroyOperator(ops[j]); + } taosMemoryFree(ops); return code; } From 791c0c878fd8c463295c37fd40fff20af70f0fc7 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 15:04:56 +0800 Subject: [PATCH 45/60] test: scan returned values in ci --- source/libs/parser/src/parAstCreater.c | 1 - source/libs/parser/src/parAstParser.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index f8b3cebaa9..cd7cda01e0 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -207,7 +207,6 @@ static bool checkIndexName(SAstCreateContext* pCxt, SToken* pIndexName) { return false; } return true; - } static bool checkTopicName(SAstCreateContext* pCxt, SToken* pTopicName) { diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index f0ecc14588..49be0b8d90 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -725,7 +725,7 @@ static int32_t collectMetaKeyFromShowCreateView(SCollectMetaKeyCxt* pCxt, SShowC strcpy(name.dbname, pStmt->dbName); strcpy(name.tname, pStmt->viewName); char dbFName[TSDB_DB_FNAME_LEN]; - tNameGetFullDbName(&name, dbFName); + (void)tNameGetFullDbName(&name, dbFName); int32_t code = catalogRemoveViewMeta(pCxt->pParseCxt->pCatalog, dbFName, 0, pStmt->viewName, 0); if (TSDB_CODE_SUCCESS == code) { code = reserveViewUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName, pStmt->viewName, From 79cf8b82d0b34bc19487867a787b574b41850476 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 2 Aug 2024 15:50:17 +0800 Subject: [PATCH 46/60] fix: threadlocal definition issue --- include/os/osDef.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/os/osDef.h b/include/os/osDef.h index 75c6a0dc73..439f4b5c6a 100644 --- a/include/os/osDef.h +++ b/include/os/osDef.h @@ -188,9 +188,10 @@ void syslog(int unused, const char *format, ...); #define ALIGN8(n) ALIGN_NUM(n, 8) #undef threadlocal -#ifdef _ISOC11_SOURCE -#define threadlocal _Thread_local -#elif defined(__APPLE__) +//#ifdef _ISOC11_SOURCE +//#define threadlocal _Thread_local +//#elif defined(__APPLE__) +#if defined(__APPLE__) #define threadlocal __thread #elif defined(__GNUC__) && !defined(threadlocal) #define threadlocal __thread From a443fafad2f5193e0eef0af5d5bed1e453ecf1bc Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 2 Aug 2024 16:29:23 +0800 Subject: [PATCH 47/60] fix: interval case issue --- tests/script/tsim/query/interval-offset.sim | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/script/tsim/query/interval-offset.sim b/tests/script/tsim/query/interval-offset.sim index 0d796af0a0..fe3e4c9844 100644 --- a/tests/script/tsim/query/interval-offset.sim +++ b/tests/script/tsim/query/interval-offset.sim @@ -200,8 +200,9 @@ if $data02 != 2678400000 then return -1 endi -sql_error select _wstart, count(tbcol), _wduration, _wstart, count(*) from ct3 interval(1n, 1w) sliding(2w) -sql_error select _wstart, count(tbcol), _wduration, _wstart, count(*) from ct3 interval(1n, 1w) sliding(4w) +sql select _wstart, count(tbcol), _wduration, _wstart, count(*) from ct3 interval(1n, 1w) sliding(2w) +sql select _wstart, count(tbcol), _wduration, _wstart, count(*) from ct3 interval(1n, 1w) sliding(4w) +sql_error select _wstart, count(tbcol), _wduration, _wstart, count(*) from ct3 interval(1n, 1w) sliding(5w) sql select _wstart, count(tbcol), _wduration, _wstart, count(*) from ct4 interval(1y, 6n) print ===> select _wstart, count(tbcol), _wduration, _wstart, count(*) from ct4 interval(1y, 6n) From a2e0532c5441d90adf32c95aae58b2771687c4c6 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Fri, 2 Aug 2024 16:50:01 +0800 Subject: [PATCH 48/60] mem leak --- source/libs/executor/src/executil.c | 1 + source/libs/executor/src/groupoperator.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 853b2865bb..5957d08a18 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -2001,6 +2001,7 @@ _end: taosMemoryFree(pFuncCtx[i].input.pData); taosMemoryFree(pFuncCtx[i].input.pColumnDataAgg); } + taosMemoryFreeClear(*rowEntryInfoOffset); taosMemoryFreeClear(pFuncCtx); return NULL; } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 706cf50270..d88aef8fb7 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -551,6 +551,10 @@ int32_t createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNo pOperator->exprSupp.hasWindowOrGroup = true; SSDataBlock* pResBlock = createDataBlockFromDescNode(pAggNode->node.pOutputDataBlockDesc); + if (pResBlock == NULL) { + code = terrno; + goto _error; + } initBasicInfo(&pInfo->binfo, pResBlock); int32_t numOfScalarExpr = 0; @@ -602,6 +606,7 @@ _error: if (pInfo != NULL) { destroyGroupOperatorInfo(pInfo); } + destroyOperator(pOperator); taosMemoryFreeClear(pOperator); return code; } From 895a9a1f3d2c9823590c2b9b9710a3aa02085110 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 16:54:08 +0800 Subject: [PATCH 49/60] fix(stream): check status before start init, do some internal refactor. --- include/libs/stream/tstream.h | 13 +-- source/libs/stream/src/streamCheckpoint.c | 97 +++++++++++++---------- source/libs/stream/src/streamMeta.c | 29 +++++-- source/libs/stream/src/streamSched.c | 9 ++- source/libs/stream/src/streamTask.c | 21 +++++ source/libs/stream/src/streamTaskSm.c | 8 +- 6 files changed, 118 insertions(+), 59 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 0d1e62cdbe..90cb06ff42 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -641,12 +641,13 @@ bool streamTaskShouldPause(const SStreamTask* pStatus); bool streamTaskIsIdle(const SStreamTask* pTask); bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus); -int32_t createStreamTaskIdStr(int64_t streamId, int32_t taskId, const char** pId); -SStreamTaskState streamTaskGetStatus(const SStreamTask* pTask); -const char* streamTaskGetStatusStr(ETaskStatus status); -void streamTaskResetStatus(SStreamTask* pTask); -void streamTaskSetStatusReady(SStreamTask* pTask); -ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask); +int32_t createStreamTaskIdStr(int64_t streamId, int32_t taskId, const char** pId); +SStreamTaskState streamTaskGetStatus(const SStreamTask* pTask); +const char* streamTaskGetStatusStr(ETaskStatus status); +void streamTaskResetStatus(SStreamTask* pTask); +void streamTaskSetStatusReady(SStreamTask* pTask); +ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask); +const char* streamTaskGetExecType(int32_t type); bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList); void streamTaskResetUpstreamStageInfo(SStreamTask* pTask); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index f817447099..f7c61b48e3 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -355,43 +355,15 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock return code; } -/** - * All down stream tasks have successfully completed the check point task. - * Current stream task is allowed to start to do checkpoint things in ASYNC model. - */ -int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask, int64_t checkpointId, int32_t downstreamNodeId, - int32_t downstreamTaskId) { - ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__AGG); - SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; - - const char* id = pTask->id.idStr; - bool received = false; - int32_t total = streamTaskGetNumOfDownstream(pTask); - ASSERT(total > 0); - - // 1. not in checkpoint status now - SStreamTaskState pStat = streamTaskGetStatus(pTask); - if (pStat.state != TASK_STATUS__CK) { - stError("s-task:%s status:%s discard checkpoint-ready msg from task:0x%x", id, pStat.name, downstreamTaskId); - return TSDB_CODE_STREAM_TASK_IVLD_STATUS; - } - - // 2. expired checkpoint-ready msg, invalid checkpoint-ready msg - if (pTask->chkInfo.checkpointId > checkpointId || pInfo->activeId != checkpointId) { - stError("s-task:%s status:%s checkpointId:%" PRId64 " new arrival checkpoint-ready msg (checkpointId:%" PRId64 - ") from task:0x%x, expired and discard ", - id, pStat.name, pTask->chkInfo.checkpointId, checkpointId, downstreamTaskId); - return -1; - } - - streamMutexLock(&pInfo->lock); - - // only when all downstream tasks are send checkpoint rsp, we can start the checkpoint procedure for the agg task +// only when all downstream tasks are send checkpoint rsp, we can start the checkpoint procedure for the agg task +static int32_t processCheckpointReadyHelp(SActiveCheckpointInfo* pInfo, int32_t numOfDownstream, + int32_t downstreamNodeId, int64_t streamId, int32_t downstreamTaskId, + const char* id, int32_t* pNotReady, int32_t* pTransId) { + bool received = false; int32_t size = taosArrayGetSize(pInfo->pCheckpointReadyRecvList); for (int32_t i = 0; i < size; ++i) { STaskDownstreamReadyInfo* p = taosArrayGet(pInfo->pCheckpointReadyRecvList, i); if (p == NULL) { - streamMutexUnlock(&pInfo->lock); return TSDB_CODE_INVALID_PARA; } @@ -403,27 +375,69 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask, int64_t checkpointId if (received) { stDebug("s-task:%s already recv checkpoint-ready msg from downstream:0x%x, ignore. %d/%d downstream not ready", id, - downstreamTaskId, (int32_t)(total - taosArrayGetSize(pInfo->pCheckpointReadyRecvList)), total); + downstreamTaskId, (int32_t)(numOfDownstream - taosArrayGetSize(pInfo->pCheckpointReadyRecvList)), + numOfDownstream); } else { STaskDownstreamReadyInfo info = {.recvTs = taosGetTimestampMs(), .downstreamTaskId = downstreamTaskId, .checkpointId = pInfo->activeId, .transId = pInfo->transId, - .streamId = pTask->id.streamId, + .streamId = streamId, .downstreamNodeId = downstreamNodeId}; - (void)taosArrayPush(pInfo->pCheckpointReadyRecvList, &info); + void* p = taosArrayPush(pInfo->pCheckpointReadyRecvList, &info); + if (p == NULL) { + stError("s-task:%s failed to set checkpoint ready recv msg, code:%s", id, tstrerror(terrno)); + return terrno; + } } - int32_t notReady = total - taosArrayGetSize(pInfo->pCheckpointReadyRecvList); - int32_t transId = pInfo->transId; + *pNotReady = numOfDownstream - taosArrayGetSize(pInfo->pCheckpointReadyRecvList); + *pTransId = pInfo->transId; + return 0; +} + +/** + * All down stream tasks have successfully completed the check point task. + * Current stream task is allowed to start to do checkpoint things in ASYNC model. + */ +int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask, int64_t checkpointId, int32_t downstreamNodeId, + int32_t downstreamTaskId) { + SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; + + const char* id = pTask->id.idStr; + int32_t total = streamTaskGetNumOfDownstream(pTask); + int32_t code = 0; + int32_t notReady = 0; + int32_t transId = 0; + + ASSERT(total > 0 && (pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__AGG)); + + // 1. not in checkpoint status now + SStreamTaskState pStat = streamTaskGetStatus(pTask); + if (pStat.state != TASK_STATUS__CK) { + stError("s-task:%s status:%s discard checkpoint-ready msg from task:0x%x", id, pStat.name, downstreamTaskId); + return TSDB_CODE_STREAM_TASK_IVLD_STATUS; + } + + // 2. expired checkpoint-ready msg, invalid checkpoint-ready msg + if (pTask->chkInfo.checkpointId > checkpointId || pInfo->activeId != checkpointId) { + stError("s-task:%s status:%s checkpointId:%" PRId64 " new arrival checkpoint-ready msg (checkpointId:%" PRId64 + ") from task:0x%x, expired and discard", + id, pStat.name, pTask->chkInfo.checkpointId, checkpointId, downstreamTaskId); + return TSDB_CODE_INVALID_MSG; + } + + streamMutexLock(&pInfo->lock); + code = processCheckpointReadyHelp(pInfo, total, downstreamNodeId, pTask->id.streamId, downstreamTaskId, id, ¬Ready, + &transId); streamMutexUnlock(&pInfo->lock); - if (notReady == 0) { + if ((notReady == 0) && (code == 0)) { stDebug("s-task:%s all downstream tasks have completed build checkpoint, do checkpoint for current task", id); (void)appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT, checkpointId, transId, -1); } - return 0; + return code; } int32_t streamTaskProcessCheckpointReadyRsp(SStreamTask* pTask, int32_t upstreamTaskId, int64_t checkpointId) { @@ -1034,8 +1048,7 @@ int32_t streamTaskGetNumOfConfirmed(SStreamTask* pTask) { for (int32_t i = 0; i < taosArrayGetSize(pInfo->pDispatchTriggerList); ++i) { STaskTriggerSendInfo* p = taosArrayGet(pInfo->pDispatchTriggerList, i); if (p == NULL) { - streamMutexUnlock(&pInfo->lock); - return num; + continue; } if (p->recved) { diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 08cf490b94..8ecd62d1eb 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1464,14 +1464,16 @@ bool streamMetaAllTasksReady(const SStreamMeta* pMeta) { } int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { - int32_t code = 0; - int32_t vgId = pMeta->vgId; + int32_t code = 0; + int32_t vgId = pMeta->vgId; + SStreamTask* pTask = NULL; + bool continueExec = true; + stInfo("vgId:%d start task:0x%x by checking it's downstream status", vgId, taskId); - SStreamTask* pTask = NULL; code = streamMetaAcquireTask(pMeta, streamId, taskId, &pTask); if (pTask == NULL) { - stError("vgId:%d failed to acquire task:0x%x when starting task", pMeta->vgId, taskId); + stError("vgId:%d failed to acquire task:0x%x when starting task", vgId, taskId); (void)streamMetaAddFailedTask(pMeta, streamId, taskId); return TSDB_CODE_STREAM_TASK_IVLD_STATUS; } @@ -1479,10 +1481,26 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas // fill-history task can only be launched by related stream tasks. STaskExecStatisInfo* pInfo = &pTask->execInfo; if (pTask->info.fillHistory == 1) { + stError("s-task:0x%x vgId:%d fill-histroy task, not start here", taskId, vgId); streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_SUCCESS; } + streamMutexLock(&pTask->lock); + SStreamTaskState status = streamTaskGetStatus(pTask); + if (status.state != TASK_STATUS__UNINIT) { + stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name); + continueExec = false; + } else { + continueExec = true; + } + streamMutexUnlock(&pTask->lock); + + if (!continueExec) { + streamMetaReleaseTask(pMeta, pTask); + return TSDB_CODE_STREAM_TASK_IVLD_STATUS; + } + ASSERT(pTask->status.downstreamReady == 0); // avoid initialization and destroy running concurrently. @@ -1501,7 +1519,8 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas if (code == TSDB_CODE_SUCCESS) { code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); if (code != TSDB_CODE_SUCCESS) { - stError("s-task:%s vgId:%d failed to handle event:%d", pTask->id.idStr, pMeta->vgId, TASK_EVENT_INIT); + stError("s-task:%s vgId:%d failed to handle event:%d, code:%s", pTask->id.idStr, pMeta->vgId, TASK_EVENT_INIT, + tstrerror(code)); streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); } } diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index f9fcf36668..a83a0e4cc8 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -48,14 +48,15 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3 SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); if (pRunReq == NULL) { stError("vgId:%d failed to create msg to start stream task:0x%x exec, type:%d, code:%s", vgId, taskId, execType, - terrstr()); - return TSDB_CODE_OUT_OF_MEMORY; + terrstr(terrno)); + return terrno; } if (streamId != 0) { - stDebug("vgId:%d create msg to start stream task:0x%x, exec type:%d", vgId, taskId, execType); + stDebug("vgId:%d create msg to for task:0x%x, exec type:%d, %s", vgId, taskId, execType, + streamTaskGetExecType(execType)); } else { - stDebug("vgId:%d create msg to exec, type:%d", vgId, execType); + stDebug("vgId:%d create msg to exec, type:%d, %s", vgId, execType, streamTaskGetExecType(execType)); } pRunReq->head.vgId = vgId; diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index a249bad724..f07fd81953 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -1149,4 +1149,25 @@ void streamTaskClearActiveInfo(SActiveCheckpointInfo* pInfo) { taosArrayClear(pInfo->pDispatchTriggerList); taosArrayClear(pInfo->pCheckpointReadyRecvList); +} + +const char* streamTaskGetExecType(int32_t type) { + switch (type) { + case STREAM_EXEC_T_EXTRACT_WAL_DATA: + return "scan-wal-file"; + case STREAM_EXEC_T_START_ALL_TASKS: + return "start-all-tasks"; + case STREAM_EXEC_T_START_ONE_TASK: + return "start-one-task"; + case STREAM_EXEC_T_RESTART_ALL_TASKS: + return "restart-all-tasks"; + case STREAM_EXEC_T_STOP_ALL_TASKS: + return "stop-all-tasks"; + case STREAM_EXEC_T_RESUME_TASK: + return "resume-task-from-idle"; + case STREAM_EXEC_T_ADD_FAILED_TASK: + return "record-start-failed-task"; + default: + return "invalid-exec-type"; + } } \ No newline at end of file diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 275c9255d2..0779eede9f 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -322,12 +322,11 @@ static int32_t doHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskSt if (pTrans->attachEvent.event != 0) { code = attachWaitedEvent(pTask, &pTrans->attachEvent); + streamMutexUnlock(&pTask->lock); if (code) { return code; } - streamMutexUnlock(&pTask->lock); - while (1) { // wait for the task to be here streamMutexLock(&pTask->lock); @@ -557,6 +556,11 @@ ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask) { } const char* streamTaskGetStatusStr(ETaskStatus status) { + int32_t index = status; + if (index < 0 || index > tListLen(StreamTaskStatusList)) { + return ""; + } + return StreamTaskStatusList[status].name; } From 9e30aef1921dd05100e2226d96858e23c61ccd2a Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Fri, 2 Aug 2024 16:54:54 +0800 Subject: [PATCH 50/60] test: scan returned values in ci --- Jenkinsfile2 | 6 +++--- tests/ci/scan_file_path.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index de96a23d51..a4f765c9fb 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -137,7 +137,7 @@ def pre_test(){ ''' } else { sh ''' - echo "unmatched reposiotry ${CHANGE_URL}" + echo "unmatched repository ${CHANGE_URL}" ''' } sh ''' @@ -247,7 +247,7 @@ def pre_test_win(){ ''' } else { bat ''' - echo "unmatched reposiotry %CHANGE_URL%" + echo "unmatched repository %CHANGE_URL%" ''' } } @@ -309,7 +309,7 @@ def pre_test_build_win() { python -m pip install taospy==2.7.13 python -m pip uninstall taos-ws-py -y python -m pip install taos-ws-py==0.3.1 - xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 + xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 ''' return 1 } diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index f4cf25b8f7..03f2d6ee4f 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -62,7 +62,7 @@ log_file_path = f"{work_path}/scan_log/scan_{branch_name}_{current_time}/" os.makedirs(log_file_path, exist_ok=True) -scan_log_file = f"{log_file_path}/scan.log" +scan_log_file = f"{log_file_path}/scan_log.txt" logger.add(scan_log_file, rotation="10MB", retention="7 days", level="DEBUG") #if error happens, open this to debug # print(self_path,work_path,TD_project_path,log_file_path,change_file_list) @@ -212,8 +212,8 @@ if __name__ == "__main__": # for item in res: # data += item[0] + "," + str(item[1]) + "\n" # logger.info("Csv data: %s" % data) - write_csv(os.path.join(log_file_path, "scan_res.csv"), res) - scan_result_log = f"{log_file_path}/scan_res.csv" + write_csv(os.path.join(log_file_path, "scan_res.txt"), res) + scan_result_log = f"{log_file_path}/scan_res.txt" # delete the first element of res res= res[1:] logger.info("The result of scan: \n") From c1ca6ce46484420cc1de10104774143e80ceac0b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 17:24:29 +0800 Subject: [PATCH 51/60] fix(stream): set the concurrently handle init failed. --- source/libs/stream/src/streamMeta.c | 22 ++++++++++++++++++++-- source/libs/stream/src/streamTaskSm.c | 9 ++++++++- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 8ecd62d1eb..670f692c59 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1379,7 +1379,10 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { if (ret != TSDB_CODE_SUCCESS) { stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT); code = ret; - streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); + + if (code != TSDB_CODE_STREAM_INVALID_STATETRANS) { + streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); + } } streamMetaReleaseTask(pMeta, pTask); @@ -1486,6 +1489,8 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas return TSDB_CODE_SUCCESS; } + // the start all tasks procedure may happen to start the newly deployed stream task, and results in the + // concurrently start this task by two threads. streamMutexLock(&pTask->lock); SStreamTaskState status = streamTaskGetStatus(pTask); if (status.state != TASK_STATUS__UNINIT) { @@ -1516,12 +1521,17 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas streamMutexUnlock(&pTask->lock); } + // concurrently start task may cause the later started task be failed, and also failed to added into meta result. if (code == TSDB_CODE_SUCCESS) { code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); if (code != TSDB_CODE_SUCCESS) { stError("s-task:%s vgId:%d failed to handle event:%d, code:%s", pTask->id.idStr, pMeta->vgId, TASK_EVENT_INIT, tstrerror(code)); - streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); + + // do no added into result hashmap if it is failed due to concurrently starting of this stream task. + if (code != TSDB_CODE_STREAM_INVALID_STATETRANS) { + streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); + } } } @@ -1584,6 +1594,14 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready}; int32_t code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); if (code) { + if (code == TSDB_CODE_DUP_KEY) { + stError("vgId:%d record start task result failed, s-task:0x%x already exist start results in meta dst hashmap", + pMeta->vgId, id.taskId); + } else { + stError("vgId:%d failed to record start task:0x%x results, start all tasks failed", pMeta->vgId, id.taskId); + } + streamMetaWUnLock(pMeta); + return code; } int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta); diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 0779eede9f..c63df059af 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -405,7 +405,7 @@ int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event) { EStreamTaskEvent evt = pSM->pActiveTrans->event; streamMutexUnlock(&pTask->lock); - stDebug("s-task:%s status:%s handling event:%s by some other thread, wait for 100ms and check if completed", + stDebug("s-task:%s status:%s handling event:%s by another thread, wait for 100ms and check if completed", pTask->id.idStr, pSM->current.name, GET_EVT_NAME(evt)); taosMsleep(100); } else { @@ -418,6 +418,13 @@ int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event) { } if (pSM->pActiveTrans != NULL) { + // not allowed concurrently initialization + if (event == TASK_EVENT_INIT && pSM->pActiveTrans->event == TASK_EVENT_INIT) { + stError("s-task:%s already in handling init procedure, handle this init event failed", pTask->id.idStr); + code = TSDB_CODE_STREAM_INVALID_STATETRANS; + break; + } + // currently in some state transfer procedure, not auto invoke transfer, abort it stDebug("s-task:%s event:%s handle procedure quit, status %s -> %s failed, handle event %s now", pTask->id.idStr, GET_EVT_NAME(pSM->pActiveTrans->event), pSM->current.name, From e0a7e64a651e9fb3a4fc4fddced365c29a47bdec Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 17:28:05 +0800 Subject: [PATCH 52/60] fix(stream): fix deadlock. --- source/libs/stream/src/streamTaskSm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index c63df059af..d3c39da6bd 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -420,9 +420,9 @@ int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event) { if (pSM->pActiveTrans != NULL) { // not allowed concurrently initialization if (event == TASK_EVENT_INIT && pSM->pActiveTrans->event == TASK_EVENT_INIT) { + streamMutexUnlock(&pTask->lock); stError("s-task:%s already in handling init procedure, handle this init event failed", pTask->id.idStr); - code = TSDB_CODE_STREAM_INVALID_STATETRANS; - break; + return TSDB_CODE_STREAM_INVALID_STATETRANS; } // currently in some state transfer procedure, not auto invoke transfer, abort it From 9d219ad62a875c32be7e80dae8dea667369d13ba Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 17:35:47 +0800 Subject: [PATCH 53/60] refactor: update log. --- source/dnode/vnode/src/tq/tqStreamTask.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index c84e016459..3dc4beca57 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -82,7 +82,7 @@ static void doStartScanWal(void* param, void* tmrId) { taosMemoryFree(pParam); if (code) { - tqError("vgId:%d failed sched task to scan wal", vgId); + tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); } } From 281f636954787bc07febb8a927b8bf04e95a1ec4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Aug 2024 18:21:46 +0800 Subject: [PATCH 54/60] fix(stream): fix syntax error. --- source/libs/stream/src/streamMeta.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 670f692c59..87293c59ec 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1565,11 +1565,12 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 int64_t endTs, bool ready) { STaskStartInfo* pStartInfo = &pMeta->startInfo; STaskId id = {.streamId = streamId, .taskId = taskId}; + int32_t vgId = pMeta->vgId; streamMetaWLock(pMeta); SStreamTask** p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (p == NULL) { // task does not exists in current vnode, not record the complete info - stError("vgId:%d s-task:0x%x not exists discard the check downstream info", pMeta->vgId, taskId); + stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId); streamMetaWUnLock(pMeta); return 0; } @@ -1584,7 +1585,7 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 stDebug( "vgId:%d not in start all task(s) process, not record launch result status, s-task:0x%x launch succ:%d elapsed " "time:%" PRId64 "ms", - pMeta->vgId, taskId, ready, el); + vgId, taskId, ready, el); streamMetaWUnLock(pMeta); return 0; } @@ -1595,10 +1596,11 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 int32_t code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); if (code) { if (code == TSDB_CODE_DUP_KEY) { - stError("vgId:%d record start task result failed, s-task:0x%x already exist start results in meta dst hashmap", - pMeta->vgId, id.taskId); + stError("vgId:%d record start task result failed, s-task:0x%" PRIx64 + " already exist start results in meta start task result hashmap", + vgId, id.taskId); } else { - stError("vgId:%d failed to record start task:0x%x results, start all tasks failed", pMeta->vgId, id.taskId); + stError("vgId:%d failed to record start task:0x%" PRIx64 " results, start all tasks failed", vgId, id.taskId); } streamMetaWUnLock(pMeta); return code; @@ -1613,20 +1615,20 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64 ", readyTs:%" PRId64 " total elapsed time:%.2fs", - pMeta->vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs, + vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs, pStartInfo->elapsedTime / 1000.0); // print the initialization elapsed time and info displayStatusInfo(pMeta, pStartInfo->pReadyTaskSet, true); displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false); - streamMetaResetStartInfo(pStartInfo, pMeta->vgId); + streamMetaResetStartInfo(pStartInfo, vgId); streamMetaWUnLock(pMeta); code = pStartInfo->completeFn(pMeta); } else { streamMetaWUnLock(pMeta); - stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d, total:%d", pMeta->vgId, taskId, - ready, numOfRecv, numOfTotal); + stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d, total:%d", vgId, taskId, ready, + numOfRecv, numOfTotal); } return code; From 34ce872eafa1f9d7d08bdd694cc69046efec668a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 5 Aug 2024 10:16:09 +0800 Subject: [PATCH 55/60] refactor: do some internal refactor. --- source/dnode/vnode/src/tqCommon/tqCommon.c | 11 +++++-- source/libs/stream/src/streamCheckpoint.c | 36 ++++++++++++++-------- source/libs/stream/src/streamData.c | 7 +++-- source/libs/stream/src/streamDispatch.c | 7 ----- 4 files changed, 37 insertions(+), 24 deletions(-) diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index b56c474ed5..a4c490e9b5 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -553,8 +553,15 @@ int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg) return code; } - tqDebug("vgId:%d s-task:%s received the checkpoint-ready msg from task:0x%x (vgId:%d), handle it", vgId, - pTask->id.idStr, req.downstreamTaskId, req.downstreamNodeId); + if (pTask->info.taskLevel == TASK_LEVEL__SINK) { + tqDebug("vgId:%d s-task:%s recv invalid the checkpoint-ready msg from task:0x%x (vgId:%d), discard", vgId, + pTask->id.idStr, req.downstreamTaskId, req.downstreamNodeId); + streamMetaReleaseTask(pMeta, pTask); + return TSDB_CODE_INVALID_MSG; + } else { + tqDebug("vgId:%d s-task:%s received the checkpoint-ready msg from task:0x%x (vgId:%d), handle it", vgId, + pTask->id.idStr, req.downstreamTaskId, req.downstreamNodeId); + } code = streamProcessCheckpointReadyMsg(pTask, req.checkpointId, req.downstreamTaskId, req.downstreamNodeId); streamMetaReleaseTask(pMeta, pTask); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index f7c61b48e3..640e2af94f 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -94,12 +94,17 @@ int32_t appendCheckpointIntoInputQ(SStreamTask* pTask, int32_t checkpointType, i } int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq) { - ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE); + if (pTask->info.taskLevel != TASK_LEVEL__SOURCE) { + return TSDB_CODE_INVALID_MSG; + } // todo this status may not be set here. // 1. set task status to be prepared for check point, no data are allowed to put into inputQ. int32_t code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT); - ASSERT(code == TSDB_CODE_SUCCESS); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:%s failed to handle gen-checkpoint event, failed to start checkpoint procedure", pTask->id.idStr); + return code; + } pTask->chkInfo.pActiveInfo->transId = pReq->transId; pTask->chkInfo.pActiveInfo->activeId = pReq->checkpointId; @@ -112,7 +117,10 @@ int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSo } int32_t streamTaskProcessCheckpointTriggerRsp(SStreamTask* pTask, SCheckpointTriggerRsp* pRsp) { - ASSERT(pTask->info.taskLevel != TASK_LEVEL__SOURCE); + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + stError("s-task:%s invalid msg recv, checkpoint-trigger rsp not handled", pTask->id.idStr); + return TSDB_CODE_INVALID_MSG; + } if (pRsp->rspCode != TSDB_CODE_SUCCESS) { stDebug("s-task:%s retrieve checkpoint-trgger rsp from upstream:0x%x invalid, code:%s", pTask->id.idStr, @@ -258,7 +266,6 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock } if (p->upstreamTaskId == pBlock->srcTaskId) { - ASSERT(p->checkpointId == checkpointId); stWarn("s-task:%s repeatly recv checkpoint-source msg from task:0x%x vgId:%d, checkpointId:%" PRId64 ", prev recvTs:%" PRId64 " discard", pTask->id.idStr, p->upstreamTaskId, p->upstreamNodeId, p->checkpointId, p->recvTs); @@ -320,7 +327,6 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock streamFreeQitem((SStreamQueueItem*)pBlock); } } else if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) { - ASSERT(taosArrayGetSize(pTask->upstreamInfo.pList) > 0); if (pTask->chkInfo.startTs == 0) { pTask->chkInfo.startTs = taosGetTimestampMs(); pTask->execInfo.checkpoint += 1; @@ -410,8 +416,6 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask, int64_t checkpointId int32_t notReady = 0; int32_t transId = 0; - ASSERT(total > 0 && (pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__AGG)); - // 1. not in checkpoint status now SStreamTaskState pStat = streamTaskGetStatus(pTask); if (pStat.state != TASK_STATUS__CK) { @@ -799,6 +803,13 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, quit", id, ref); + streamMetaReleaseTask(pTask->pMeta, pTask); + return; + } + // check the status every 100ms if (streamTaskShouldStop(pTask)) { int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); @@ -843,7 +854,6 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { // send msg to retrieve checkpoint trigger msg SArray* pList = pTask->upstreamInfo.pList; - ASSERT(pTask->info.taskLevel > TASK_LEVEL__SOURCE); SArray* pNotSendList = taosArrayInit(4, sizeof(SStreamUpstreamEpInfo)); if (pNotSendList == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -1085,10 +1095,12 @@ void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId) { int32_t numOfConfirmed = streamTaskGetNumOfConfirmed(pTask); int32_t total = streamTaskGetNumOfDownstream(pTask); - stDebug("s-task:%s set downstream:0x%x(vgId:%d) checkpoint-trigger dispatch confirmed, total confirmed:%d/%d", - pTask->id.idStr, taskId, vgId, numOfConfirmed, total); - - ASSERT(taskId != 0); + if (taskId == 0) { + stError("s-task:%s recv invalid trigger-dispatch confirm, vgId:%d", pTask->id.idStr, vgId); + } else { + stDebug("s-task:%s set downstream:0x%x(vgId:%d) checkpoint-trigger dispatch confirmed, total confirmed:%d/%d", + pTask->id.idStr, taskId, vgId, numOfConfirmed, total); + } } static int32_t uploadCheckpointToS3(const char* id, const char* path) { diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 57e5322e38..eb846b5a92 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -175,9 +175,10 @@ int32_t streamDataSubmitNew(SPackedData* pData, int32_t type, SStreamDataSubmit* } void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit) { - ASSERT(pDataSubmit->type == STREAM_INPUT__DATA_SUBMIT); - taosMemoryFree(pDataSubmit->submit.msgStr); - taosFreeQitem(pDataSubmit); + if (pDataSubmit != NULL && pDataSubmit->type == STREAM_INPUT__DATA_SUBMIT) { + taosMemoryFree(pDataSubmit->submit.msgStr); + taosFreeQitem(pDataSubmit); + } } int32_t streamMergedSubmitNew(SStreamMergedSubmit** pSubmit) { diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 255afb44f9..d245548ce5 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -96,8 +96,6 @@ int32_t streamTaskBroadcastRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* r int32_t code = 0; void* buf = NULL; int32_t sz = taosArrayGetSize(pTask->upstreamInfo.pList); - ASSERT(sz > 0); - for (int32_t i = 0; i < sz; i++) { req->reqId = tGenIdPI64(); SStreamUpstreamEpInfo* pEpInfo = taosArrayGetP(pTask->upstreamInfo.pList, i); @@ -107,7 +105,6 @@ int32_t streamTaskBroadcastRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* r tEncodeSize(tEncodeStreamRetrieveReq, req, len, code); if (code != 0) { - ASSERT(0); return code; } @@ -946,8 +943,6 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) { SArray* pList = pTask->chkInfo.pActiveInfo->pReadyMsgList; streamMutexLock(&pTask->chkInfo.pActiveInfo->lock); - ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE); - if (taosArrayGetSize(pList) == 1) { STaskCheckpointReadyInfo* pInfo = taosArrayGet(pList, 0); tmsgSendRsp(&pInfo->msg); @@ -1122,8 +1117,6 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa void initCheckpointReadyInfo(STaskCheckpointReadyInfo* pReadyInfo, int32_t upstreamNodeId, int32_t upstreamTaskId, int32_t childId, SEpSet* pEpset, int64_t checkpointId) { - ASSERT(upstreamTaskId != 0); - pReadyInfo->upstreamTaskId = upstreamTaskId; pReadyInfo->upstreamNodeEpset = *pEpset; pReadyInfo->upstreamNodeId = upstreamNodeId; From 183f33af876cc8aa8cc3b9757269e700ef7b8ee6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 5 Aug 2024 10:35:00 +0800 Subject: [PATCH 56/60] fix(stream): fix syntax error. --- source/libs/stream/src/streamCheckpoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 640e2af94f..acac5dfc9e 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -805,7 +805,7 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, quit", id, ref); + stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, ref:%d quit", id, ref); streamMetaReleaseTask(pTask->pMeta, pTask); return; } From 9a2ee547194f0a97f20c4aa7dc08fad0b2bd6405 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 5 Aug 2024 11:06:20 +0800 Subject: [PATCH 57/60] fix(stream): check return value. --- source/common/src/tdatablock.c | 112 +++++++++++++++++++++++++++++++-- 1 file changed, 107 insertions(+), 5 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 82bd1b24f6..b489314e21 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -644,6 +644,10 @@ int32_t blockDataUpdatePkRange(SSDataBlock* pDataBlock, int32_t pkColumnIndex, b SDataBlockInfo* pInfo = &pDataBlock->info; SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, pkColumnIndex); + if (pColInfoData == NULL) { + return terrno; + } + if (!IS_NUMERIC_TYPE(pColInfoData->info.type) && (pColInfoData->info.type != TSDB_DATA_TYPE_VARCHAR)) { return 0; } @@ -685,6 +689,9 @@ int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc) { for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i); SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i); + if (pCol1 == NULL || pCol2 == NULL) { + return terrno; + } capacity = pDest->info.capacity; int32_t ret = colDataMergeCol(pCol2, pDest->info.rows, &capacity, pCol1, pSrc->info.rows); @@ -709,6 +716,9 @@ int32_t blockDataMergeNRows(SSDataBlock* pDest, const SSDataBlock* pSrc, int32_t for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i); SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i); + if (pCol2 == NULL || pCol1 == NULL) { + return terrno; + } code = colDataAssignNRows(pCol2, pDest->info.rows, pCol1, srcIdx, numOfRows); if (code) { @@ -729,6 +739,10 @@ void blockDataShrinkNRows(SSDataBlock* pBlock, int32_t numOfRows) { size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); + if (pCol == NULL) { + continue; + } + if (IS_VAR_DATA_TYPE(pCol->info.type)) { pCol->varmeta.length = pCol->varmeta.offset[pBlock->info.rows - numOfRows]; memset(pCol->varmeta.offset + pBlock->info.rows - numOfRows, 0, sizeof(*pCol->varmeta.offset) * numOfRows); @@ -760,6 +774,10 @@ size_t blockDataGetSize(const SSDataBlock* pBlock) { size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + if (pColInfoData == NULL) { + continue; + } + total += colDataGetFullLength(pColInfoData, pBlock->info.rows); } @@ -861,6 +879,10 @@ int32_t blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int32_t r for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i); SColumnInfoData* pDstCol = taosArrayGet(pDst->pDataBlock, i); + if (pColData == NULL || pDstCol == NULL) { + continue; + } + for (int32_t j = startIndex; j < (startIndex + rowCount); ++j) { bool isNull = false; if (pBlock->pBlockAgg == NULL) { @@ -908,6 +930,10 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) { for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); + if (pCol == NULL) { + continue; + } + if (IS_VAR_DATA_TYPE(pCol->info.type)) { memcpy(pStart, pCol->varmeta.offset, numOfRows * sizeof(int32_t)); pStart += numOfRows * sizeof(int32_t); @@ -958,6 +984,9 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) { for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); + if (pCol == NULL) { + continue; + } if (IS_VAR_DATA_TYPE(pCol->info.type)) { size_t metaSize = pBlock->info.rows * sizeof(int32_t); @@ -965,6 +994,7 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) { if (tmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } + pCol->varmeta.offset = (int32_t*)tmp; memcpy(pCol->varmeta.offset, pStart, metaSize); pStart += metaSize; @@ -1039,6 +1069,10 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); + if (pCol == NULL) { + continue; + } + pCol->hasNull = true; if (IS_VAR_DATA_TYPE(pCol->info.type)) { @@ -1087,6 +1121,10 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) { size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, i); + if (pColInfo == NULL) { + continue; + } + rowSize += pColInfo->info.bytes; } @@ -1114,8 +1152,11 @@ double blockDataGetSerialRowSize(const SSDataBlock* pBlock) { size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, i); - rowSize += pColInfo->info.bytes; + if (pColInfo == NULL) { + continue; + } + rowSize += pColInfo->info.bytes; if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { rowSize += sizeof(int32_t); } else { @@ -1193,6 +1234,9 @@ static int32_t blockDataAssign(SColumnInfoData* pCols, const SSDataBlock* pDataB for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pDst = &pCols[i]; SColumnInfoData* pSrc = taosArrayGet(pDataBlock->pDataBlock, i); + if (pSrc == NULL) { + continue; + } if (IS_VAR_DATA_TYPE(pSrc->info.type)) { if (pSrc->varmeta.length != 0) { @@ -1228,8 +1272,11 @@ static SColumnInfoData* createHelpColInfoData(const SSDataBlock* pDataBlock) { for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, i); - pCols[i].info = pColInfoData->info; + if (pColInfoData == NULL) { + continue; + } + pCols[i].info = pColInfoData->info; if (IS_VAR_DATA_TYPE(pCols[i].info.type)) { pCols[i].varmeta.offset = taosMemoryCalloc(rows, sizeof(int32_t)); pCols[i].pData = taosMemoryCalloc(1, pColInfoData->varmeta.length); @@ -1256,8 +1303,11 @@ static void copyBackToBlock(SSDataBlock* pDataBlock, SColumnInfoData* pCols) { for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, i); - pColInfoData->info = pCols[i].info; + if (pColInfoData == NULL) { + continue; + } + pColInfoData->info = pCols[i].info; if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { taosMemoryFreeClear(pColInfoData->varmeta.offset); pColInfoData->varmeta = pCols[i].varmeta; @@ -1301,8 +1351,15 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) { for (int32_t i = 0; i < taosArrayGetSize(pOrderInfo); ++i) { SBlockOrderInfo* pInfo = taosArrayGet(pOrderInfo, i); + if (pInfo == NULL) { + continue; + } SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, pInfo->slotId); + if (pColInfoData == NULL) { + continue; + } + if (pColInfoData->hasNull) { sortColumnHasNull = true; } @@ -1319,6 +1376,9 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) { if (!varTypeSort) { SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, 0); SBlockOrderInfo* pOrder = taosArrayGet(pOrderInfo, 0); + if (pColInfoData == NULL || pOrder == NULL) { + return errno; + } int64_t p0 = taosGetTimestampUs(); @@ -1346,7 +1406,14 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) { SSDataBlockSortHelper helper = {.pDataBlock = pDataBlock, .orderInfo = pOrderInfo}; for (int32_t i = 0; i < taosArrayGetSize(helper.orderInfo); ++i) { struct SBlockOrderInfo* pInfo = taosArrayGet(helper.orderInfo, i); + if (pInfo == NULL) { + continue; + } + pInfo->pColData = taosArrayGet(pDataBlock->pDataBlock, pInfo->slotId); + if (pInfo->pColData == NULL) { + continue; + } pInfo->compFn = getKeyComparFunc(pInfo->pColData->info.type, pInfo->order); } @@ -1399,6 +1466,10 @@ void blockDataEmpty(SSDataBlock* pDataBlock) { size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* p = taosArrayGet(pDataBlock->pDataBlock, i); + if (p == NULL) { + continue; + } + colInfoDataCleanup(p, pInfo->capacity); } @@ -1417,6 +1488,10 @@ void blockDataReset(SSDataBlock* pDataBlock) { size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* p = taosArrayGet(pDataBlock->pDataBlock, i); + if (p == NULL) { + continue; + } + p->hasNull = false; p->reassigned = false; if (IS_VAR_DATA_TYPE(p->info.type)) { @@ -1527,6 +1602,10 @@ int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows) { size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* p = taosArrayGet(pDataBlock->pDataBlock, i); + if (p == NULL) { + return terrno; + } + code = doEnsureCapacity(p, &pDataBlock->info, numOfRows, false); if (code) { return code; @@ -1544,6 +1623,10 @@ void blockDataFreeRes(SSDataBlock* pBlock) { int32_t numOfOutput = taosArrayGetSize(pBlock->pDataBlock); for (int32_t i = 0; i < numOfOutput; ++i) { SColumnInfoData* pColInfoData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i); + if (pColInfoData == NULL) { + continue; + } + colDataDestroy(pColInfoData); } @@ -1579,6 +1662,10 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) { size_t numOfCols = taosArrayGetSize(src->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* p = taosArrayGet(src->pDataBlock, i); + if (p == NULL) { + return terrno; + } + SColumnInfoData colInfo = {.hasNull = true, .info = p->info}; code = blockDataAppendColInfo(dst, &colInfo); if (code) { @@ -1594,7 +1681,7 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) { for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pDst = taosArrayGet(dst->pDataBlock, i); SColumnInfoData* pSrc = taosArrayGet(src->pDataBlock, i); - if (pSrc->pData == NULL && (!IS_VAR_DATA_TYPE(pSrc->info.type))) { + if (pSrc == NULL || pDst == NULL || (pSrc->pData == NULL && (!IS_VAR_DATA_TYPE(pSrc->info.type)))) { continue; } @@ -1622,6 +1709,10 @@ int32_t copyDataBlock(SSDataBlock* pDst, const SSDataBlock* pSrc) { for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pDstCol = taosArrayGet(pDst->pDataBlock, i); SColumnInfoData* pSrcCol = taosArrayGet(pSrc->pDataBlock, i); + if (pDstCol == NULL || pSrcCol == NULL) { + continue; + } + int32_t ret = colDataAssign(pDstCol, pSrcCol, pSrc->info.rows, &pSrc->info); if (ret < 0) { code = ret; @@ -3149,15 +3240,26 @@ int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo) { if (!pDataBlock || !pOrderInfo) return 0; for (int32_t i = 0; i < taosArrayGetSize(pOrderInfo); ++i) { SBlockOrderInfo* pOrder = taosArrayGet(pOrderInfo, i); + if (pOrder == NULL) { + continue; + } + pOrder->pColData = taosArrayGet(pDataBlock->pDataBlock, pOrder->slotId); + if (pOrder->pColData == NULL) { + continue; + } + pOrder->compFn = getKeyComparFunc(pOrder->pColData->info.type, pOrder->order); } + SSDataBlockSortHelper sortHelper = {.orderInfo = pOrderInfo, .pDataBlock = pDataBlock}; - int32_t rowIdx = 0, nextRowIdx = 1; + + int32_t rowIdx = 0, nextRowIdx = 1; for (; rowIdx < pDataBlock->info.rows && nextRowIdx < pDataBlock->info.rows; ++rowIdx, ++nextRowIdx) { if (dataBlockCompar(&nextRowIdx, &rowIdx, &sortHelper) < 0) { break; } } + return nextRowIdx; } From 0bcfe11e84d57e4d13a14548237f7c7fb987a73b Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 5 Aug 2024 11:55:36 +0800 Subject: [PATCH 58/60] refactor remve backend code --- include/libs/stream/tstream.h | 2 +- source/libs/stream/inc/streamBackendRocksdb.h | 3 +++ source/libs/stream/src/streamBackendRocksdb.c | 17 ++++++++++++-- source/libs/stream/src/streamTask.c | 22 ++++++++++--------- source/libs/stream/src/streamTaskSm.c | 2 +- 5 files changed, 32 insertions(+), 14 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 90cb06ff42..9c59e3f3ec 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -534,7 +534,7 @@ void tFreeStreamTask(SStreamTask* pTask); int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask); int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask); int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver); -void streamFreeTaskState(SStreamTask* pTask, ETaskStatus status); +void streamFreeTaskState(SStreamTask* pTask, int8_t remove); int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo); int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId); diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h index 0f158591b4..3bb4532db3 100644 --- a/source/libs/stream/inc/streamBackendRocksdb.h +++ b/source/libs/stream/inc/streamBackendRocksdb.h @@ -81,6 +81,7 @@ typedef struct { int64_t dataWritten; void* pMeta; + int8_t removeAllFiles; } STaskDbWrapper; @@ -152,6 +153,8 @@ void taskDbUpdateChkpId(void* pTaskDb, int64_t chkpId); void* taskDbAddRef(void* pTaskDb); void taskDbRemoveRef(void* pTaskDb); +void taskDbRemoveAllFiles(void* pTaskDb); + int streamStateOpenBackend(void* backend, SStreamState* pState); void streamStateCloseBackend(SStreamState* pState, bool remove); void streamStateDestroyCompar(void* arg); diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 7396c6b7c6..8498c9118a 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -2331,6 +2331,15 @@ void taskDbRemoveRef(void* pTaskDb) { (void)taosReleaseRef(taskDbWrapperId, pBackend->refId); } +void taskDbRemoveAllFiles(void* pTaskDb) { + if (pTaskDb == NULL) { + return; + } + + STaskDbWrapper* pBackend = pTaskDb; + atomic_store_8(&pBackend->removeAllFiles, 1); +} + void taskDbInitOpt(STaskDbWrapper* pTaskDb) { rocksdb_env_t* env = rocksdb_create_default_env(); @@ -2573,8 +2582,7 @@ void taskDbDestroy(void* pDb, bool flush) { stDebug("succ to destroy stream backend:%p", wrapper); int8_t nCf = tListLen(ginitDict); - - if (flush) { + if (flush && wrapper->removeAllFiles == 0) { if (wrapper->db && wrapper->pCf) { rocksdb_flushoptions_t* flushOpt = rocksdb_flushoptions_create(); rocksdb_flushoptions_set_wait(flushOpt, 1); @@ -2636,6 +2644,11 @@ void taskDbDestroy(void* pDb, bool flush) { taskDbDestroyChkpOpt(wrapper); taosMemoryFree(wrapper->idstr); + + if (wrapper->removeAllFiles) { + char* err = NULL; + taosRemoveDir(wrapper->path); + } taosMemoryFree(wrapper->path); taosMemoryFree(wrapper); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index f07fd81953..90167e446e 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -275,7 +275,7 @@ void tFreeStreamTask(SStreamTask* pTask) { } streamTaskCleanupCheckInfo(&pTask->taskCheckInfo); - streamFreeTaskState(pTask, status1); + streamFreeTaskState(pTask, pTask->status.removeBackendFiles ? 1 : 0); if (pTask->pNameMap) { tSimpleHashCleanup(pTask->pNameMap); @@ -296,14 +296,14 @@ void tFreeStreamTask(SStreamTask* pTask) { taosArrayDestroy(pTask->outputInfo.pNodeEpsetUpdateList); pTask->outputInfo.pNodeEpsetUpdateList = NULL; - if ((pTask->status.removeBackendFiles) && (pTask->pMeta != NULL)) { - char* path = taosMemoryCalloc(1, strlen(pTask->pMeta->path) + 128); - sprintf(path, "%s%s%s", pTask->pMeta->path, TD_DIRSEP, pTask->id.idStr); - taosRemoveDir(path); + // if ((pTask->status.removeBackendFiles) && (pTask->pMeta != NULL)) { + // char* path = taosMemoryCalloc(1, strlen(pTask->pMeta->path) + 128); + // sprintf(path, "%s%s%s", pTask->pMeta->path, TD_DIRSEP, pTask->id.idStr); + // taosRemoveDir(path); - stInfo("s-task:0x%x vgId:%d remove all backend files:%s", taskId, pTask->pMeta->vgId, path); - taosMemoryFree(path); - } + // stInfo("s-task:0x%x vgId:%d remove all backend files:%s", taskId, pTask->pMeta->vgId, path); + // taosMemoryFree(path); + // } if (pTask->id.idStr != NULL) { taosMemoryFree((void*)pTask->id.idStr); @@ -316,10 +316,12 @@ void tFreeStreamTask(SStreamTask* pTask) { stDebug("s-task:0x%x free task completed", taskId); } -void streamFreeTaskState(SStreamTask* pTask, ETaskStatus status) { +void streamFreeTaskState(SStreamTask* pTask, int8_t remove) { if (pTask->pState != NULL) { stDebug("s-task:0x%x start to free task state", pTask->id.taskId); - streamStateClose(pTask->pState, status == TASK_STATUS__DROPPING); + streamStateClose(pTask->pState, remove); + + taskDbRemoveAllFiles(pTask->pBackend); taskDbRemoveRef(pTask->pBackend); pTask->pBackend = NULL; pTask->pState = NULL; diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index d3c39da6bd..04969c2b48 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -98,7 +98,7 @@ static int32_t attachWaitedEvent(SStreamTask* pTask, SFutureHandleEventInfo* pEv static int32_t stopTaskSuccFn(SStreamTask* pTask) { SStreamTaskSM* pSM = pTask->status.pSM; - streamFreeTaskState(pTask, pSM->current.state); + streamFreeTaskState(pTask,pSM->current.state == TASK_STATUS__DROPPING ? 1 : 0); return TSDB_CODE_SUCCESS; } From 40537001a2bab42ca1bf529ce1e3b10e647a0df3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 5 Aug 2024 11:57:18 +0800 Subject: [PATCH 59/60] fix(stream): check return value. --- source/libs/executor/inc/executil.h | 9 ++-- source/libs/executor/src/aggregateoperator.c | 30 +++++------ source/libs/executor/src/cachescanoperator.c | 4 +- .../libs/executor/src/countwindowoperator.c | 10 +++- .../libs/executor/src/eventwindowoperator.c | 10 +++- source/libs/executor/src/executil.c | 17 +++++-- source/libs/executor/src/filloperator.c | 19 ++++--- source/libs/executor/src/groupoperator.c | 51 ++++++++++--------- source/libs/executor/src/projectoperator.c | 30 +++++++---- source/libs/executor/src/scanoperator.c | 22 +++++--- source/libs/executor/src/sortoperator.c | 10 +++- .../executor/src/streamcountwindowoperator.c | 10 +++- .../executor/src/streameventwindowoperator.c | 10 +++- source/libs/executor/src/streamfilloperator.c | 17 +++++-- .../executor/src/streamtimewindowoperator.c | 42 ++++++++++++--- source/libs/executor/src/sysscanoperator.c | 5 +- source/libs/executor/src/timesliceoperator.c | 10 +++- source/libs/executor/src/timewindowoperator.c | 38 +++++++++++--- 18 files changed, 234 insertions(+), 110 deletions(-) diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index f3ceb33f64..2adc863baf 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -174,9 +174,9 @@ SArray* makeColumnArrayFromList(SNodeList* pNodeList); int32_t extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, int32_t type, SColMatchInfo* pMatchInfo); -int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId); -int32_t createExprFromTargetNode(SExprInfo* pExp, STargetNode* pTargetNode); -SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs); +int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId); +int32_t createExprFromTargetNode(SExprInfo* pExp, STargetNode* pTargetNode); +int32_t createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, SExprInfo** pExprInfo, int32_t* numOfExprs); SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset, SFunctionStateStore* pStore); @@ -197,9 +197,6 @@ char* getStreamOpName(uint16_t opType); void printDataBlock(SSDataBlock* pBlock, const char* flag, const char* taskIdStr); void printSpecDataBlock(SSDataBlock* pBlock, const char* flag, const char* opStr, const char* taskIdStr); -void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order); -void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery); - TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols); void updateTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pWin, int64_t delta); diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index 7e105d2260..7c63120fcf 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -73,7 +73,13 @@ int32_t createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pA SOperatorInfo** pOptrInfo) { QRY_OPTR_CHECK(pOptrInfo); - int32_t code = 0; + int32_t lino = 0; + int32_t code = 0; + int32_t num = 0; + SExprInfo* pExprInfo = NULL; + int32_t numOfScalarExpr = 0; + SExprInfo* pScalarExprInfo = NULL; + SAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -89,29 +95,23 @@ int32_t createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pA size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(&pOperator->resultInfo, 4096); - int32_t num = 0; - SExprInfo* pExprInfo = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &num); + code = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &pExprInfo, &num); + TSDB_CHECK_CODE(code, lino, _error); + code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } + TSDB_CHECK_CODE(code, lino, _error); - int32_t numOfScalarExpr = 0; - SExprInfo* pScalarExprInfo = NULL; if (pAggNode->pExprs != NULL) { - pScalarExprInfo = createExprInfo(pAggNode->pExprs, NULL, &numOfScalarExpr); + code = createExprInfo(pAggNode->pExprs, NULL, &pScalarExprInfo, &numOfScalarExpr); + TSDB_CHECK_CODE(code, lino, _error); } code = initExprSupp(&pInfo->scalarExprSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } + TSDB_CHECK_CODE(code, lino, _error); code = filterInitFromNode((SNode*)pAggNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } + TSDB_CHECK_CODE(code, lino, _error); pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock; pInfo->groupKeyOptimized = pAggNode->groupKeyOptimized; diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 9d49c8e9ca..81d55ec092 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -200,7 +200,9 @@ int32_t createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandl if (pScanNode->scan.pScanPseudoCols != NULL) { SExprSupp* p = &pInfo->pseudoExprSup; - p->pExprInfo = createExprInfo(pScanNode->scan.pScanPseudoCols, NULL, &p->numOfExprs); + code = createExprInfo(pScanNode->scan.pScanPseudoCols, NULL, &p->pExprInfo, &p->numOfExprs); + TSDB_CHECK_CODE(code, lino, _error); + p->pCtx = createSqlFunctionCtx(p->pExprInfo, p->numOfExprs, &p->rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore); } diff --git a/source/libs/executor/src/countwindowoperator.c b/source/libs/executor/src/countwindowoperator.c index b7aa57e4b1..8d2ad4cbad 100644 --- a/source/libs/executor/src/countwindowoperator.c +++ b/source/libs/executor/src/countwindowoperator.c @@ -256,14 +256,19 @@ int32_t createCountwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* phy if (pCountWindowNode->window.pExprs != NULL) { int32_t numOfScalarExpr = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pCountWindowNode->window.pExprs, NULL, &numOfScalarExpr); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pCountWindowNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalarExpr); + QUERY_CHECK_CODE(code, lino, _error); code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); } size_t keyBufSize = 0; int32_t num = 0; - SExprInfo* pExprInfo = createExprInfo(pCountWindowNode->window.pFuncs, NULL, &num); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pCountWindowNode->window.pFuncs, NULL, &pExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + initResultSizeInfo(&pOperator->resultInfo, 4096); code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, @@ -286,6 +291,7 @@ int32_t createCountwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* phy if (pInfo->windowCount != pInfo->windowSliding) { numOfItem = pInfo->windowCount / pInfo->windowSliding + 1; } + pInfo->countSup.pWinStates = taosArrayInit_s(itemSize, numOfItem); if (!pInfo->countSup.pWinStates) { goto _error; diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c index 629afbbb8e..6a39cac525 100644 --- a/source/libs/executor/src/eventwindowoperator.c +++ b/source/libs/executor/src/eventwindowoperator.c @@ -84,7 +84,10 @@ int32_t createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* phy if (pEventWindowNode->window.pExprs != NULL) { int32_t numOfScalarExpr = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pEventWindowNode->window.pExprs, NULL, &numOfScalarExpr); + SExprInfo* pScalarExprInfo = NULL; + + code = createExprInfo(pEventWindowNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalarExpr); + QUERY_CHECK_CODE(code, lino, _error); code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); } @@ -95,7 +98,10 @@ int32_t createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* phy size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; int32_t num = 0; - SExprInfo* pExprInfo = createExprInfo(pEventWindowNode->window.pFuncs, NULL, &num); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pEventWindowNode->window.pFuncs, NULL, &pExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + initResultSizeInfo(&pOperator->resultInfo, 4096); code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 5957d08a18..cdc8cc6dd5 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1822,7 +1822,10 @@ SExprInfo* createExpr(SNodeList* pNodeList, int32_t* numOfExprs) { return pExprs; } -SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs) { +int32_t createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, SExprInfo** pExprInfo, int32_t* numOfExprs) { + QRY_OPTR_CHECK(pExprInfo); + + int32_t code = 0; int32_t numOfFuncs = LIST_LENGTH(pNodeList); int32_t numOfGroupKeys = 0; if (pGroupKeys != NULL) { @@ -1831,10 +1834,13 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* *numOfExprs = numOfFuncs + numOfGroupKeys; if (*numOfExprs == 0) { - return NULL; + return code; } SExprInfo* pExprs = taosMemoryCalloc(*numOfExprs, sizeof(SExprInfo)); + if (pExprs == NULL) { + return terrno; + } for (int32_t i = 0; i < (*numOfExprs); ++i) { STargetNode* pTargetNode = NULL; @@ -1845,15 +1851,16 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* } SExprInfo* pExp = &pExprs[i]; - int32_t code = createExprFromTargetNode(pExp, pTargetNode); + code = createExprFromTargetNode(pExp, pTargetNode); if (code != TSDB_CODE_SUCCESS) { taosMemoryFreeClear(pExprs); qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); - return NULL; + return code; } } - return pExprs; + *pExprInfo = pExprs; + return code; } // set the output buffer for the selectivity + tag query diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index c4ef74608a..d88e09273f 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -455,6 +455,7 @@ int32_t createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFi SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) { QRY_OPTR_CHECK(pOptrInfo); int32_t code = 0; + int32_t lino = 0; SFillOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SFillOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); @@ -464,21 +465,23 @@ int32_t createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFi } pInfo->pRes = createDataBlockFromDescNode(pPhyFillNode->node.pOutputDataBlockDesc); - SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pInfo->numOfExpr); + SExprInfo* pExprInfo = NULL; + + code = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pExprInfo, &pInfo->numOfExpr); + QUERY_CHECK_CODE(code, lino, _error); + pOperator->exprSupp.pExprInfo = pExprInfo; SExprSupp* pNoFillSupp = &pInfo->noFillExprSupp; - pNoFillSupp->pExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pNoFillSupp->numOfExprs); + code = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pNoFillSupp->pExprInfo, &pNoFillSupp->numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + code = createPrimaryTsExprIfNeeded(pInfo, pPhyFillNode, pNoFillSupp, pTaskInfo->id.str); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } + QUERY_CHECK_CODE(code, lino, _error); code = initExprSupp(pNoFillSupp, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs, &pTaskInfo->storageAPI.functionStore); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } + QUERY_CHECK_CODE(code, lino, _error); SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index d88aef8fb7..43b2f5ab6d 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -560,7 +560,8 @@ int32_t createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNo int32_t numOfScalarExpr = 0; SExprInfo* pScalarExprInfo = NULL; if (pAggNode->pExprs != NULL) { - pScalarExprInfo = createExprInfo(pAggNode->pExprs, NULL, &numOfScalarExpr); + code = createExprInfo(pAggNode->pExprs, NULL, &pScalarExprInfo, &numOfScalarExpr); + QUERY_CHECK_CODE(code, lino, _error); } pInfo->pGroupCols = NULL; @@ -578,7 +579,11 @@ int32_t createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNo QUERY_CHECK_CODE(code, lino, _error); int32_t num = 0; - SExprInfo* pExprInfo = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &num); + SExprInfo* pExprInfo = NULL; + + code = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &pExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, pInfo->groupKeyLen, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); @@ -1125,42 +1130,42 @@ int32_t createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNo SPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SPartitionOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { - pTaskInfo->code = code = TSDB_CODE_OUT_OF_MEMORY; + pTaskInfo->code = code = terrno; goto _error; } int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + + code = createExprInfo(pPartNode->pTargets, NULL, &pExprInfo, &numOfCols); + pInfo->pGroupCols = makeColumnArrayFromList(pPartNode->pPartitionKeys); if (pPartNode->needBlockOutputTsOrder) { SBlockOrderInfo order = {.order = ORDER_ASC, .pColData = NULL, .nullFirst = false, .slotId = pPartNode->tsSlotId}; pInfo->pOrderInfoArr = taosArrayInit(1, sizeof(SBlockOrderInfo)); if (!pInfo->pOrderInfoArr) { - terrno = TSDB_CODE_OUT_OF_MEMORY; pTaskInfo->code = terrno; goto _error; } + void* tmp = taosArrayPush(pInfo->pOrderInfoArr, &order); QUERY_CHECK_NULL(tmp, code, lino, _error, terrno); } if (pPartNode->pExprs != NULL) { int32_t num = 0; - SExprInfo* pExprInfo1 = createExprInfo(pPartNode->pExprs, NULL, &num); + SExprInfo* pExprInfo1 = NULL; + code = createExprInfo(pPartNode->pExprs, NULL, &pExprInfo1, &num); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num, &pTaskInfo->storageAPI.functionStore); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - pTaskInfo->code = terrno; - goto _error; - } + QUERY_CHECK_CODE(code, lino, _error); } _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pGroupSet = taosHashInit(100, hashFn, false, HASH_NO_LOCK); if (pInfo->pGroupSet == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - pTaskInfo->code = terrno; goto _error; } @@ -1170,22 +1175,17 @@ int32_t createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNo pInfo->binfo.pRes = createDataBlockFromDescNode(pPartNode->node.pOutputDataBlockDesc); code = getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz); if (code != TSDB_CODE_SUCCESS) { - terrno = code; - pTaskInfo->code = code; goto _error; } if (!osTempSpaceAvailable()) { terrno = TSDB_CODE_NO_DISKSPACE; - pTaskInfo->code = terrno; qError("Create partition operator info failed since %s, tempDir:%s", terrstr(), tsTempDir); goto _error; } code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir); if (code != TSDB_CODE_SUCCESS) { - terrno = code; - pTaskInfo->code = code; goto _error; } @@ -1195,8 +1195,6 @@ int32_t createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNo pInfo->columnOffset = setupColumnOffset(pInfo->binfo.pRes, pInfo->rowCapacity); code = initGroupOptrInfo(&pInfo->pGroupColVals, &pInfo->groupKeyLen, &pInfo->keyBuf, pInfo->pGroupCols); if (code != TSDB_CODE_SUCCESS) { - terrno = code; - pTaskInfo->code = code; goto _error; } @@ -1210,8 +1208,6 @@ int32_t createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNo code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { - terrno = code; - pTaskInfo->code = code; goto _error; } @@ -1224,7 +1220,7 @@ _error: } pTaskInfo->code = code; taosMemoryFreeClear(pOperator); - return code; + TAOS_RETURN(code); } int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, @@ -1663,7 +1659,10 @@ int32_t createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPart if (pPartNode->part.pExprs != NULL) { int32_t num = 0; - SExprInfo* pCalExprInfo = createExprInfo(pPartNode->part.pExprs, NULL, &num); + SExprInfo* pCalExprInfo = NULL; + code = createExprInfo(pPartNode->part.pExprs, NULL, &pCalExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSup, pCalExprInfo, num, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); } @@ -1724,7 +1723,9 @@ int32_t createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPart QUERY_CHECK_CODE(code, lino, _error); int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pPartNode->part.pTargets, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pPartNode->part.pTargets, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); setOperatorInfo(pOperator, "StreamPartitionOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION, false, OP_NOT_OPENED, pInfo, pTaskInfo); diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 295180652d..7185f74254 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -108,9 +108,13 @@ int32_t createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* int32_t lino = 0; int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pProjPhyNode->pProjections, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pProjPhyNode->pProjections, NULL, &pExprInfo, &numOfCols); + TSDB_CHECK_CODE(code, lino, _error); SSDataBlock* pResBlock = createDataBlockFromDescNode(pProjPhyNode->node.pOutputDataBlockDesc); + TSDB_CHECK_NULL(pResBlock, code, lino, _error, terrno); + initLimitInfo(pProjPhyNode->node.pLimit, pProjPhyNode->node.pSlimit, &pInfo->limitInfo); pInfo->binfo.pRes = pResBlock; @@ -258,14 +262,13 @@ int32_t doProjectOperation(SOperatorInfo* pOperator, SSDataBlock** pResBlock) { SProjectOperatorInfo* pProjectInfo = pOperator->info; SOptrBasicInfo* pInfo = &pProjectInfo->binfo; - - SExprSupp* pSup = &pOperator->exprSupp; - SSDataBlock* pRes = pInfo->pRes; - SSDataBlock* pFinalRes = pProjectInfo->pFinalRes; - int32_t code = 0; - int64_t st = 0; - int32_t order = pInfo->inputTsOrder; - int32_t scanFlag = 0; + SExprSupp* pSup = &pOperator->exprSupp; + SSDataBlock* pRes = pInfo->pRes; + SSDataBlock* pFinalRes = pProjectInfo->pFinalRes; + int32_t code = 0; + int64_t st = 0; + int32_t order = pInfo->inputTsOrder; + int32_t scanFlag = 0; blockDataCleanup(pFinalRes); SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -465,11 +468,16 @@ int32_t createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode* SIndefRowsFuncPhysiNode* pPhyNode = (SIndefRowsFuncPhysiNode*)pNode; int32_t numOfExpr = 0; - SExprInfo* pExprInfo = createExprInfo(pPhyNode->pFuncs, NULL, &numOfExpr); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pPhyNode->pFuncs, NULL, &pExprInfo, &numOfExpr); + TSDB_CHECK_CODE(code, lino, _error); if (pPhyNode->pExprs != NULL) { int32_t num = 0; - SExprInfo* pSExpr = createExprInfo(pPhyNode->pExprs, NULL, &num); + SExprInfo* pSExpr = NULL; + code = createExprInfo(pPhyNode->pExprs, NULL, &pSExpr, &num); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSup, pSExpr, num, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { goto _error; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index acc3de3447..d491ffb524 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1338,7 +1338,10 @@ int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHa if (pScanNode->pScanPseudoCols != NULL) { SExprSupp* pSup = &pInfo->base.pseudoSup; - pSup->pExprInfo = createExprInfo(pScanNode->pScanPseudoCols, NULL, &pSup->numOfExprs); + pSup->pExprInfo = NULL; + code = createExprInfo(pScanNode->pScanPseudoCols, NULL, &pSup->pExprInfo, &pSup->numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore); } @@ -3981,13 +3984,12 @@ int32_t createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* // create the pseduo columns info if (pTableScanNode->scan.pScanPseudoCols != NULL) { - pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr); + code = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->pPseudoExpr, &pInfo->numOfPseudoExpr); + QUERY_CHECK_CODE(code, lino, _error); } code = filterInitFromNode((SNode*)pScanPhyNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } + QUERY_CHECK_CODE(code, lino, _error); pInfo->pRes = createDataBlockFromDescNode(pDescNode); code = createSpecialDataBlock(STREAM_CLEAR, &pInfo->pUpdateRes); @@ -4539,7 +4541,11 @@ int32_t createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* p SDataBlockDescNode* pDescNode = pPhyNode->node.pOutputDataBlockDesc; int32_t numOfExprs = 0; - SExprInfo* pExprInfo = createExprInfo(pPhyNode->pScanPseudoCols, NULL, &numOfExprs); + SExprInfo* pExprInfo = NULL; + + code = createExprInfo(pPhyNode->pScanPseudoCols, NULL, &pExprInfo, &numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfExprs, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); @@ -5694,7 +5700,9 @@ int32_t createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SR if (pTableScanNode->scan.pScanPseudoCols != NULL) { SExprSupp* pSup = &pInfo->base.pseudoSup; - pSup->pExprInfo = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pSup->numOfExprs); + code = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pSup->pExprInfo, &pSup->numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore); } diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index a0c56df49c..a08787d358 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -60,6 +60,8 @@ int32_t createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortN QRY_OPTR_CHECK(pOptrInfo); int32_t code = 0; + int32_t lino = 0; + SSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -71,7 +73,9 @@ int32_t createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortN SDataBlockDescNode* pDescNode = pSortNode->node.pOutputDataBlockDesc; int32_t numOfCols = 0; - pOperator->exprSupp.pExprInfo = createExprInfo(pSortNode->pExprs, NULL, &numOfCols); + code = createExprInfo(pSortNode->pExprs, NULL, &pOperator->exprSupp.pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + pOperator->exprSupp.numOfExprs = numOfCols; int32_t numOfOutputCols = 0; code = @@ -770,7 +774,9 @@ int32_t createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNo SDataBlockDescNode* pDescNode = pSortPhyNode->node.pOutputDataBlockDesc; int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pSortPhyNode->pExprs, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pSortPhyNode->pExprs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); pSup->pExprInfo = pExprInfo; pSup->numOfExprs = numOfCols; diff --git a/source/libs/executor/src/streamcountwindowoperator.c b/source/libs/executor/src/streamcountwindowoperator.c index 6adc60b79e..8ac73b44f6 100644 --- a/source/libs/executor/src/streamcountwindowoperator.c +++ b/source/libs/executor/src/streamcountwindowoperator.c @@ -823,13 +823,19 @@ int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* initResultSizeInfo(&pOperator->resultInfo, 4096); if (pCountNode->window.pExprs != NULL) { int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pCountNode->window.pExprs, NULL, &numOfScalar); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pCountNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); } SExprSupp* pExpSup = &pOperator->exprSupp; - SExprInfo* pExprInfo = createExprInfo(pCountNode->window.pFuncs, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pCountNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 17ef2fe41f..1311216c06 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -864,7 +864,10 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* initResultSizeInfo(&pOperator->resultInfo, 4096); if (pEventNode->window.pExprs != NULL) { int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pEventNode->window.pExprs, NULL, &numOfScalar); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pEventNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -884,7 +887,10 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* SExprSupp* pExpSup = &pOperator->exprSupp; int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pEventNode->window.pFuncs, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pEventNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); diff --git a/source/libs/executor/src/streamfilloperator.c b/source/libs/executor/src/streamfilloperator.c index 480814f6a0..39aedd9d59 100644 --- a/source/libs/executor/src/streamfilloperator.c +++ b/source/libs/executor/src/streamfilloperator.c @@ -1190,7 +1190,11 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod } pFillSup->numOfFillCols = numOfFillCols; int32_t numOfNotFillCols = 0; - SExprInfo* noFillExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &numOfNotFillCols); + SExprInfo* noFillExprInfo = NULL; + + code = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &noFillExprInfo, &numOfNotFillCols); + QUERY_CHECK_CODE(code, lino, _end); + pFillSup->pAllColInfo = createFillColInfo(pFillExprInfo, pFillSup->numOfFillCols, noFillExprInfo, numOfNotFillCols, (const SNodeListNode*)(pPhyFillNode->pValues)); pFillSup->type = convertFillType(pPhyFillNode->mode); @@ -1201,7 +1205,10 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod code = initResultBuf(pFillSup); QUERY_CHECK_CODE(code, lino, _end); - SExprInfo* noFillExpr = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &numOfNotFillCols); + SExprInfo* noFillExpr = NULL; + code = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &noFillExpr, &numOfNotFillCols); + QUERY_CHECK_CODE(code, lino, _end); + code = initExprSupp(&pFillSup->notFillExprSup, noFillExpr, numOfNotFillCols, &pAPI->functionStore); QUERY_CHECK_CODE(code, lino, _end); @@ -1343,7 +1350,11 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi SInterval* pInterval = &((SStreamIntervalOperatorInfo*)downstream->info)->interval; int32_t numOfFillCols = 0; - SExprInfo* pFillExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &numOfFillCols); + SExprInfo* pFillExprInfo = NULL; + + code = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pFillExprInfo, &numOfFillCols); + QUERY_CHECK_CODE(code, lino, _error); + pInfo->pFillSup = initStreamFillSup(pPhyFillNode, pInterval, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI); if (!pInfo->pFillSup) { code = TSDB_CODE_FAILED; diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index 7462d71a8a..3c696a1be8 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -1880,13 +1880,20 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN initResultSizeInfo(&pOperator->resultInfo, 4096); if (pIntervalPhyNode->window.pExprs != NULL) { int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar); + SExprInfo* pScalarExprInfo = NULL; + + code = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); } int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); initBasicInfo(&pInfo->binfo, pResBlock); @@ -3690,7 +3697,10 @@ int32_t createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode initResultSizeInfo(&pOperator->resultInfo, 4096); if (pSessionNode->window.pExprs != NULL) { int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pSessionNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -3698,7 +3708,10 @@ int32_t createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode } SExprSupp* pExpSup = &pOperator->exprSupp; - SExprInfo* pExprInfo = createExprInfo(pSessionNode->window.pFuncs, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pSessionNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { @@ -4831,7 +4844,10 @@ int32_t createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* initResultSizeInfo(&pOperator->resultInfo, 4096); if (pStateNode->window.pExprs != NULL) { int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pStateNode->window.pExprs, NULL, &numOfScalar); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pStateNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); } @@ -4849,7 +4865,10 @@ int32_t createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* SExprSupp* pExpSup = &pOperator->exprSupp; int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pStateNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { @@ -5126,7 +5145,10 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* } SStreamIntervalPhysiNode* pIntervalPhyNode = (SStreamIntervalPhysiNode*)pPhyNode; - SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &numOfCols); + + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); pInfo->interval = (SInterval){ @@ -5174,7 +5196,11 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* if (pIntervalPhyNode->window.pExprs != NULL) { int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar); + SExprInfo* pScalarExprInfo = NULL; + + code = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 5f4bbd66ce..90c760136e 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -2716,7 +2716,10 @@ int32_t createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDistScanP pInfo->uid = (pBlockScanNode->suid != 0) ? pBlockScanNode->suid : pBlockScanNode->uid; int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfCols, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 6eaef50491..99a66efecb 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -1126,13 +1126,19 @@ int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyN SExprSupp* pSup = &pOperator->exprSupp; int32_t numOfExprs = 0; - SExprInfo* pExprInfo = createExprInfo(pInterpPhyNode->pFuncs, NULL, &numOfExprs); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pInterpPhyNode->pFuncs, NULL, &pExprInfo, &numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(pSup, pExprInfo, numOfExprs, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); if (pInterpPhyNode->pExprs != NULL) { int32_t num = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pInterpPhyNode->pExprs, NULL, &num); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pInterpPhyNode->pExprs, NULL, &pScalarExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, num, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); } diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index a1ec923352..989eb97327 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1298,7 +1298,10 @@ int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode QUERY_CHECK_CODE(code, lino, _error); int32_t num = 0; - SExprInfo* pExprInfo = createExprInfo(pPhyNode->window.pFuncs, NULL, &num); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pPhyNode->window.pFuncs, NULL, &pExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + code = initAggSup(pSup, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); @@ -1336,7 +1339,10 @@ int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode if (pPhyNode->window.pExprs != NULL) { int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pPhyNode->window.pExprs, NULL, &numOfScalar); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pPhyNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -1578,7 +1584,10 @@ int32_t createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhy if (pStateNode->window.pExprs != NULL) { int32_t numOfScalarExpr = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pStateNode->window.pExprs, NULL, &numOfScalarExpr); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pStateNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalarExpr); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -1603,7 +1612,10 @@ int32_t createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhy size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; int32_t num = 0; - SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pStateNode->window.pFuncs, NULL, &pExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + initResultSizeInfo(&pOperator->resultInfo, 4096); code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, @@ -1682,7 +1694,10 @@ int32_t createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPh initResultSizeInfo(&pOperator->resultInfo, 4096); int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pSessionNode->window.pFuncs, NULL, &numOfCols); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pSessionNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pSessionNode->window.node.pOutputDataBlockDesc); initBasicInfo(&pInfo->binfo, pResBlock); @@ -1709,7 +1724,10 @@ int32_t createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPh if (pSessionNode->window.pExprs != NULL) { int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar); + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pSessionNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -2012,7 +2030,9 @@ int32_t createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMerge initResultSizeInfo(&pOperator->resultInfo, 512); int32_t num = 0; - SExprInfo* pExprInfo = createExprInfo(pNode->window.pFuncs, NULL, &num); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pNode->window.pFuncs, NULL, &pExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); code = initAggSup(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore); @@ -2312,7 +2332,9 @@ int32_t createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeInterva } int32_t num = 0; - SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num); + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); SInterval interval = {.interval = pIntervalPhyNode->interval, .sliding = pIntervalPhyNode->sliding, From 093e7ef0bb0127a1307025ea45f8b0c6e41d07d0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 5 Aug 2024 12:06:01 +0800 Subject: [PATCH 60/60] refactor remve backend code --- source/libs/stream/inc/streamBackendRocksdb.h | 2 +- source/libs/stream/src/streamBackendRocksdb.c | 2 +- source/libs/stream/src/streamTask.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h index 3bb4532db3..3a5d72576b 100644 --- a/source/libs/stream/inc/streamBackendRocksdb.h +++ b/source/libs/stream/inc/streamBackendRocksdb.h @@ -153,7 +153,7 @@ void taskDbUpdateChkpId(void* pTaskDb, int64_t chkpId); void* taskDbAddRef(void* pTaskDb); void taskDbRemoveRef(void* pTaskDb); -void taskDbRemoveAllFiles(void* pTaskDb); +void taskDbSetClearFileFlag(void* pTaskDb); int streamStateOpenBackend(void* backend, SStreamState* pState); void streamStateCloseBackend(SStreamState* pState, bool remove); diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 8498c9118a..e3f747fb22 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -2331,7 +2331,7 @@ void taskDbRemoveRef(void* pTaskDb) { (void)taosReleaseRef(taskDbWrapperId, pBackend->refId); } -void taskDbRemoveAllFiles(void* pTaskDb) { +void taskDbSetClearFileFlag(void* pTaskDb) { if (pTaskDb == NULL) { return; } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 90167e446e..c5b1284560 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -321,7 +321,7 @@ void streamFreeTaskState(SStreamTask* pTask, int8_t remove) { stDebug("s-task:0x%x start to free task state", pTask->id.taskId); streamStateClose(pTask->pState, remove); - taskDbRemoveAllFiles(pTask->pBackend); + taskDbSetClearFileFlag(pTask->pBackend); taskDbRemoveRef(pTask->pBackend); pTask->pBackend = NULL; pTask->pState = NULL;