From 48437191ee08f9c17a5669e9f088ad1e5d405cba Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 19 Dec 2024 16:26:17 +0800 Subject: [PATCH 01/24] fix(analytics): fix check return value error. --- source/util/src/tanalytics.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 68bbbb7e99..3812295381 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -478,11 +478,13 @@ static int32_t taosAnalJsonBufWriteStrUseCol(SAnalyticBuf *pBuf, const char *buf } if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) { - if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) { + int32_t ret = taosWriteFile(pBuf->filePtr, buf, bufLen); + if (ret < 0) { return terrno; } } else { - if (taosWriteFile(pBuf->pCols[colIndex].filePtr, buf, bufLen) != bufLen) { + int32_t ret = taosWriteFile(pBuf->pCols[colIndex].filePtr, buf, bufLen); + if (ret < 0) { return terrno; } } From 4d008308b8b176a605fd2611124c25215a26590a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 20 Dec 2024 18:49:57 +0800 Subject: [PATCH 02/24] refactor(analytics): do some refactor. --- source/util/src/tanalytics.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 3812295381..306795f5ed 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -479,12 +479,12 @@ static int32_t taosAnalJsonBufWriteStrUseCol(SAnalyticBuf *pBuf, const char *buf if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) { int32_t ret = taosWriteFile(pBuf->filePtr, buf, bufLen); - if (ret < 0) { + if (ret != bufLen) { return terrno; } } else { int32_t ret = taosWriteFile(pBuf->pCols[colIndex].filePtr, buf, bufLen); - if (ret < 0) { + if (ret != bufLen) { return terrno; } } From 59003022d8788c09608f3bd39e8b811fa579ae9e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 23 Dec 2024 14:32:04 +0800 Subject: [PATCH 03/24] fix(analytics):copy the correct post rsp. --- source/util/src/tanalytics.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 306795f5ed..5a3ceef422 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -216,12 +216,28 @@ static size_t taosCurlWriteData(char *pCont, size_t contLen, size_t nmemb, void return 0; } - pRsp->dataLen = (int64_t)contLen * (int64_t)nmemb; - pRsp->data = taosMemoryMalloc(pRsp->dataLen + 1); + int64_t size = pRsp->dataLen + (int64_t)contLen * nmemb; + if (pRsp->data == NULL) { + pRsp->data = taosMemoryMalloc(size + 1); + if (pRsp->data == NULL) { + uError("failed to prepare recv buffer for post rsp, len:%d, code:%s", (int32_t) size + 1, tstrerror(terrno)); + return terrno; + } + } else { + char* p = taosMemoryRealloc(pRsp->data, size + 1); + if (p == NULL) { + uError("failed to prepare recv buffer for post rsp, len:%d, code:%s", (int32_t) size + 1, tstrerror(terrno)); + return terrno; + } + + pRsp->data = p; + } if (pRsp->data != NULL) { - (void)memcpy(pRsp->data, pCont, pRsp->dataLen); + (void)memcpy(pRsp->data + pRsp->dataLen, pCont, pRsp->dataLen); pRsp->data[pRsp->dataLen] = 0; + pRsp->dataLen = size; + uDebugL("curl response is received, len:%" PRId64 ", content:%s", pRsp->dataLen, pRsp->data); return pRsp->dataLen; } else { From 5ba7de5844867bd2e978506bfba5a56991d20b84 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Mon, 23 Dec 2024 15:37:26 +0800 Subject: [PATCH 04/24] Doc(cfg):support dyn alter disable create file. --- docs/en/26-tdinternal/01-arch.md | 33 +++++++++++++++++++-- docs/zh/26-tdinternal/01-arch.md | 50 +++++++++++++++++++++++++++++--- 2 files changed, 76 insertions(+), 7 deletions(-) diff --git a/docs/en/26-tdinternal/01-arch.md b/docs/en/26-tdinternal/01-arch.md index 55c56a7681..ef689e0b74 100644 --- a/docs/en/26-tdinternal/01-arch.md +++ b/docs/en/26-tdinternal/01-arch.md @@ -328,8 +328,35 @@ In addition to precomputation, TDengine also supports various downsampling stora ### Multi-Level Storage and Object Storage -By default, TDengine stores all data in the /var/lib/taos directory. To expand storage capacity, reduce potential bottlenecks caused by file reading, and enhance data throughput, TDengine allows the use of the configuration parameter `dataDir` to enable the cluster to utilize multiple mounted hard drives simultaneously. +By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir". + +dataDir format is as follows: + +``` +dataDir data_path [tier_level] [primary] [disable_create_new_file] +``` + +Where `data_path` is the folder path of mount point, and `tier_level` is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. And `primary` means whether the data dir is the primary mount point. Enter 0 for false or 1 for true. The default value is 1. A TDengine cluster can have only one `primary` mount point, which must be on tier 0. And `disable_create_new_file` means whether to prohibit the creation of new file sets on the specified mount point. Enter 0 for false and 1 for true. The default value is 0. Tier 0 storage must have at least one mount point with disable_create_new_file set to 0. Tier 1 and tier 2 storage do not have this restriction. + +Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: + +``` +dataDir /mnt/disk1/taos 0 1 0 +dataDir /mnt/disk2/taos 0 0 0 +dataDir /mnt/disk3/taos 1 0 0 +dataDir /mnt/disk4/taos 1 0 1 +dataDir /mnt/disk5/taos 2 0 0 +dataDir /mnt/disk6/taos 2 0 0 +``` + +Mounted disks can also be a non-local network disk, as long as the system can access it. + +You can use the following command to dynamically modify dataDir to control whether disable_create_new_file is enabled for the current directory. + +``` +alter dnode 1 "/mnt/disk2/taos 1"; +``` + +Note: Tiered Storage is only supported in Enterprise Edition -Additionally, TDengine offers tiered data storage functionality, allowing users to store data from different time periods in directories on different storage devices. This facilitates the separation of "hot" data (frequently accessed) and "cold" data (less frequently accessed), making full use of various storage resources while saving costs. For example, data that is recently collected and requires frequent access can be stored on high-performance solid-state drives due to their high read performance requirements. Data that exceeds a certain age and has lower query demands can be stored on mechanically driven hard disks, which are relatively cheaper. -To further reduce storage costs, TDengine also supports storing time-series data in object storage systems. Through its innovative design, in most cases, the performance of querying time-series data from object storage systems is close to half that of local disks, and in some scenarios, the performance can even be comparable to local disks. Additionally, TDengine allows users to perform delete and update operations on time-series data stored in object storage. diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md index 7091ca9661..242adb11b0 100644 --- a/docs/zh/26-tdinternal/01-arch.md +++ b/docs/zh/26-tdinternal/01-arch.md @@ -323,10 +323,52 @@ TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化 除了预计算功能以外,TDengine 还支持对原始数据进行多种降采样存储。一种降采样存储方式是 Rollup SMA,它能够自动对原始数据进行降采样存储,并支持 3 个不同的数据保存层级,用户可以指定每层数据的聚合周期和保存时长。这对于那些关注数据趋势的场景尤为适用,其核心目的是减少存储开销并提高查询速度。另一种降采样存储方式是 Time-Range-Wise SMA,它可以根据聚合结果进行降采样存储,非常适合于高频的 interval 查询场景。该功能采用与普通流计算相同的逻辑,并允许用户通过设置watermark 来处理延时数据,相应地,实际的查询结果也会有一定的时间延迟。 -### 多级存储与对象存储 +### 多级存储 -在默认情况下,TDengine 将所有数据存储在 /var/lib/taos 目录中。为了扩展存储容量,减少文件读取可能导致的瓶颈,并提升数据吞吐量,TDengine 允许通过配置参数dataDir,使得集群能够同时利用挂载的多块硬盘。 +说明:多级存储功能仅企业版支持,从 2.0.16.0 版本开始提供。 -此外,TDengine 还提供了数据分级存储的功能,允许用户将不同时间段的数据存储在不同存储设备的目录中,以此实现将“热”数据和“冷”数据分开存储。这样做可以充分利用各种存储资源,同时节约成本。例如,对于最新采集且需要频繁访问的数据,由于其读取性能要求较高,用户可以配置将这些数据存储在高性能的固态硬盘上。而对于超过一定期限、查询需求较低的数据,则可以将其存储在成本相对较低的机械硬盘上。 +在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。 -为了进一步降低存储成本,TDengine 还支持将时序数据存储在对象存储系统中。通过其创新性的设计,在大多数情况下,从对象存储系统中查询时序数据的性能接近本地硬盘的一半,而在某些场景下,性能甚至可以与本地硬盘相媲美。同时,TDengine 还允许用户对存储在对象存储中的时序数据执行删除和更新操作。 +除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。 + +多级存储支持 3 级,每级最多可配置 128 个挂载点。 + +TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中): + +``` +dataDir [path] +``` + +- path: 挂载点的文件夹路径 +- level: 介质存储等级,取值为 0,1,2。 + 0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。 + 各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。 + 同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。 + 需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。 +- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。 +- disable_create_new_file: 是否禁止创建新文件组,0(否)或 1(是),省略默认为 0。 + +在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式: + +``` +dataDir /mnt/data1 0 1 0 +dataDir /mnt/data2 0 0 0 +dataDir /mnt/data3 1 0 0 +dataDir /mnt/data4 1 0 1 +dataDir /mnt/data5 2 0 0 +dataDir /mnt/data6 2 0 0 +``` + +您可以使用以下命令动态修改 dataDir 的 disable 来控制当前目录是否开启 disable_create_new_file 。 +``` +alter dnode 1 "/mnt/disk2/taos 1"; +``` + +:::note + +1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。 +2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。 +3. 多级存储目前不支持删除已经挂载的硬盘的功能。 +4. 0 级存储至少存在一个 disable_create_new_file 为 0 的挂载点,1 级 和 2 级存储没有该限制。 + +::: \ No newline at end of file From 112754d2c5a20cd540339fb49675a6ff55fabd69 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Mon, 23 Dec 2024 15:39:16 +0800 Subject: [PATCH 05/24] Fix merge errors. --- docs/zh/26-tdinternal/01-arch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md index 242adb11b0..9cc1ef6f02 100644 --- a/docs/zh/26-tdinternal/01-arch.md +++ b/docs/zh/26-tdinternal/01-arch.md @@ -323,7 +323,7 @@ TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化 除了预计算功能以外,TDengine 还支持对原始数据进行多种降采样存储。一种降采样存储方式是 Rollup SMA,它能够自动对原始数据进行降采样存储,并支持 3 个不同的数据保存层级,用户可以指定每层数据的聚合周期和保存时长。这对于那些关注数据趋势的场景尤为适用,其核心目的是减少存储开销并提高查询速度。另一种降采样存储方式是 Time-Range-Wise SMA,它可以根据聚合结果进行降采样存储,非常适合于高频的 interval 查询场景。该功能采用与普通流计算相同的逻辑,并允许用户通过设置watermark 来处理延时数据,相应地,实际的查询结果也会有一定的时间延迟。 -### 多级存储 +### 多级存储与对象存储 说明:多级存储功能仅企业版支持,从 2.0.16.0 版本开始提供。 From 0beaecc5d6dd92740f2f45784424f796362d47b1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 23 Dec 2024 16:41:50 +0800 Subject: [PATCH 06/24] fix(analytics): fix bugs in recv post results. --- source/util/src/tanalytics.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 5a3ceef422..e68edd4b76 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -216,30 +216,33 @@ static size_t taosCurlWriteData(char *pCont, size_t contLen, size_t nmemb, void return 0; } - int64_t size = pRsp->dataLen + (int64_t)contLen * nmemb; + int64_t newDataSize = (int64_t) contLen * nmemb; + int64_t size = pRsp->dataLen + newDataSize; + if (pRsp->data == NULL) { pRsp->data = taosMemoryMalloc(size + 1); if (pRsp->data == NULL) { uError("failed to prepare recv buffer for post rsp, len:%d, code:%s", (int32_t) size + 1, tstrerror(terrno)); - return terrno; + return 0; // return the recv length, if failed, return 0 } } else { char* p = taosMemoryRealloc(pRsp->data, size + 1); if (p == NULL) { uError("failed to prepare recv buffer for post rsp, len:%d, code:%s", (int32_t) size + 1, tstrerror(terrno)); - return terrno; + return 0; // return the recv length, if failed, return 0 } pRsp->data = p; } if (pRsp->data != NULL) { - (void)memcpy(pRsp->data + pRsp->dataLen, pCont, pRsp->dataLen); - pRsp->data[pRsp->dataLen] = 0; - pRsp->dataLen = size; + (void)memcpy(pRsp->data + pRsp->dataLen, pCont, newDataSize); - uDebugL("curl response is received, len:%" PRId64 ", content:%s", pRsp->dataLen, pRsp->data); - return pRsp->dataLen; + pRsp->dataLen = size; + pRsp->data[size] = 0; + + uDebugL("curl response is received, len:%" PRId64 ", content:%s", size, pRsp->data); + return newDataSize; } else { pRsp->dataLen = 0; uError("failed to malloc curl response"); From 4bc4894d7c6ce7f793e19b9cf1f751dfbda9b8b9 Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Mon, 23 Dec 2024 14:13:00 +0800 Subject: [PATCH 07/24] fix:[TS-5798] Fix crash when function's param num more than 127. --- .../14-reference/03-taos-sql/10-function.md | 1 + .../14-reference/03-taos-sql/10-function.md | 1 + source/libs/function/src/builtins.c | 10 ++++- .../army/query/function/test_func_paramnum.py | 42 +++++++++++++++++++ tests/parallel_test/cases.task | 1 + 5 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 tests/army/query/function/test_func_paramnum.py diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index e6cfa20bd4..ab5c48bce2 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -943,6 +943,7 @@ CHAR(expr1 [, expr2] [, expr3] ...) - NULL values in input parameters will be skipped. - If the input parameters are of string type, they will be converted to numeric type for processing. - If the character corresponding to the input parameter is a non-printable character, the return value will still contain the character corresponding to that parameter, but it may not be displayed. +- This function can have at most 2^31 - 1 input parameters. **Examples**: diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index c075545ff3..eb3a4bb0ed 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -902,6 +902,7 @@ CHAR(expr1 [, expr2] [, epxr3] ...) - 输入参数的 NULL 值会被跳过。 - 输入参数若为字符串类型,会将其转换为数值类型处理。 - 若输入的参数对应的字符为不可打印字符,返回值中仍有该参数对应的字符,但是可能无法显示出来。 +- 输入参数的个数上限为 2^31 - 1 个。 **举例**: ```sql diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 95a332ac05..b42d739b40 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -828,14 +828,20 @@ static int32_t validateParam(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { const SParamInfo* paramPattern = funcMgtBuiltins[pFunc->funcId].parameters.inputParaInfo[i]; while (1) { - for (int8_t j = paramPattern[paramIdx].startParam; - j <= (paramPattern[paramIdx].endParam == -1 ? INT8_MAX : paramPattern[paramIdx].endParam); j++) { + // one table can have at most 4096 columns, int32_t is enough. + for (int32_t j = paramPattern[paramIdx].startParam; + j <= (paramPattern[paramIdx].endParam == -1 ? INT32_MAX - 1 : paramPattern[paramIdx].endParam); j++) { if (j > LIST_LENGTH(paramList)) { code = TSDB_CODE_SUCCESS; isMatch = true; break; } SNode* pNode = nodesListGetNode(paramList, j - 1); + if (NULL == pNode) { + code = TSDB_CODE_FUNC_FUNTION_PARA_NUM; + isMatch = false; + break; + } // check node type if (!paramSupportNodeType(pNode, paramPattern[paramIdx].validNodeType)) { code = TSDB_CODE_FUNC_FUNTION_PARA_TYPE; diff --git a/tests/army/query/function/test_func_paramnum.py b/tests/army/query/function/test_func_paramnum.py new file mode 100644 index 0000000000..37930cd624 --- /dev/null +++ b/tests/army/query/function/test_func_paramnum.py @@ -0,0 +1,42 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from frame import etool +from frame.etool import * +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.common import * + +class TDTestCase(TBase): + def create_table(self): + etool.benchMark(command = "-l 1000 -n 1 -d ts_5798") + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + self.create_table() + + tdSql.query("select last_row(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, c50, c51, c52, c53, c54, c55, c56, c57, c58, c59, c60, c61, c62, c63, c64, c65, c66, c67, c68, c69, c70, c71, c72, c73, c74, c75, c76, c77, c78, c79, c80, c81, c82, c83, c84, c85, c86, c87, c88, c89, c90, c91, c92, c93, c94, c95, c96, c97, c98, c99, c100, c101, c102, c103, c104, c105, c106, c107, c108, c109, c110, c111, c112, c113, c114, c115, c116, c117, c118, c119, c120, c121, c122, c123, c124, c125, c126, c127, c128, c129, c130, c131, c132, c133, c134, c135, c136, c137, c138, c139, c140, c141, c142, c143, c144, c145, c146, c147, c148, c149, c150, c151, c152, c153, c154, c155, c156, c157, c158, c159, c160, c161, c162, c163, c164, c165, c166, c167, c168, c169, c170, c171, c172, c173, c174, c175, c176, c177, c178, c179, c180, c181, c182, c183, c184, c185, c186, c187, c188, c189, c190, c191, c192, c193, c194, c195, c196, c197, c198, c199, c200, c201, c202, c203, c204, c205, c206, c207, c208, c209, c210, c211, c212, c213, c214, c215, c216, c217, c218, c219, c220, c221, c222, c223, c224, c225, c226, c227, c228, c229, c230, c231, c232, c233, c234, c235, c236, c237, c238, c239, c240, c241, c242, c243, c244, c245, c246, c247, c248, c249, c250, c251, c252, c253, c254, c255, c256, c257, c258, c259, c260, c261, c262, c263, c264, c265, c266, c267, c268, c269, c270, c271, c272, c273, c274, c275, c276, c277, c278, c279, c280, c281, c282, c283, c284, c285, c286, c287, c288, c289, c290, c291, c292, c293, c294, c295, c296, c297, c298, c299, c300, c301, c302, c303, c304, c305, c306, c307, c308, c309, c310, c311, c312, c313, c314, c315, c316, c317, c318, c319, c320, c321, c322, c323, c324, c325, c326, c327, c328, c329, c330, c331, c332, c333, c334, c335, c336, c337, c338, c339, c340, c341, c342, c343, c344, c345, c346, c347, c348, c349, c350, c351, c352, c353, c354, c355, c356, c357, c358, c359, c360, c361, c362, c363, c364, c365, c366, c367, c368, c369, c370, c371, c372, c373, c374, c375, c376, c377, c378, c379, c380, c381, c382, c383, c384, c385, c386, c387, c388, c389, c390, c391, c392, c393, c394, c395, c396, c397, c398, c399, c400, c401, c402, c403, c404, c405, c406, c407, c408, c409, c410, c411, c412, c413, c414, c415, c416, c417, c418, c419, c420, c421, c422, c423, c424, c425, c426, c427, c428, c429, c430, c431, c432, c433, c434, c435, c436, c437, c438, c439, c440, c441, c442, c443, c444, c445, c446, c447, c448, c449, c450, c451, c452, c453, c454, c455, c456, c457, c458, c459, c460, c461, c462, c463, c464, c465, c466, c467, c468, c469, c470, c471, c472, c473, c474, c475, c476, c477, c478, c479, c480, c481, c482, c483, c484, c485, c486, c487, c488, c489, c490, c491, c492, c493, c494, c495, c496, c497, c498, c499, c500, c501, c502, c503, c504, c505, c506, c507, c508, c509, c510, c511, c512, c513, c514, c515, c516, c517, c518, c519, c520) from ts_5798.meters;") + tdSql.checkRows(1) + tdSql.query("select first(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, c50, c51, c52, c53, c54, c55, c56, c57, c58, c59, c60, c61, c62, c63, c64, c65, c66, c67, c68, c69, c70, c71, c72, c73, c74, c75, c76, c77, c78, c79, c80, c81, c82, c83, c84, c85, c86, c87, c88, c89, c90, c91, c92, c93, c94, c95, c96, c97, c98, c99, c100, c101, c102, c103, c104, c105, c106, c107, c108, c109, c110, c111, c112, c113, c114, c115, c116, c117, c118, c119, c120, c121, c122, c123, c124, c125, c126, c127, c128, c129, c130, c131, c132, c133, c134, c135, c136, c137, c138, c139, c140, c141, c142, c143, c144, c145, c146, c147, c148, c149, c150, c151, c152, c153, c154, c155, c156, c157, c158, c159, c160, c161, c162, c163, c164, c165, c166, c167, c168, c169, c170, c171, c172, c173, c174, c175, c176, c177, c178, c179, c180, c181, c182, c183, c184, c185, c186, c187, c188, c189, c190, c191, c192, c193, c194, c195, c196, c197, c198, c199, c200, c201, c202, c203, c204, c205, c206, c207, c208, c209, c210, c211, c212, c213, c214, c215, c216, c217, c218, c219, c220, c221, c222, c223, c224, c225, c226, c227, c228, c229, c230, c231, c232, c233, c234, c235, c236, c237, c238, c239, c240, c241, c242, c243, c244, c245, c246, c247, c248, c249, c250, c251, c252, c253, c254, c255, c256, c257, c258, c259, c260, c261, c262, c263, c264, c265, c266, c267, c268, c269, c270, c271, c272, c273, c274, c275, c276, c277, c278, c279, c280, c281, c282, c283, c284, c285, c286, c287, c288, c289, c290, c291, c292, c293, c294, c295, c296, c297, c298, c299, c300, c301, c302, c303, c304, c305, c306, c307, c308, c309, c310, c311, c312, c313, c314, c315, c316, c317, c318, c319, c320, c321, c322, c323, c324, c325, c326, c327, c328, c329, c330, c331, c332, c333, c334, c335, c336, c337, c338, c339, c340, c341, c342, c343, c344, c345, c346, c347, c348, c349, c350, c351, c352, c353, c354, c355, c356, c357, c358, c359, c360, c361, c362, c363, c364, c365, c366, c367, c368, c369, c370, c371, c372, c373, c374, c375, c376, c377, c378, c379, c380, c381, c382, c383, c384, c385, c386, c387, c388, c389, c390, c391, c392, c393, c394, c395, c396, c397, c398, c399, c400, c401, c402, c403, c404, c405, c406, c407, c408, c409, c410, c411, c412, c413, c414, c415, c416, c417, c418, c419, c420, c421, c422, c423, c424, c425, c426, c427, c428, c429, c430, c431, c432, c433, c434, c435, c436, c437, c438, c439, c440, c441, c442, c443, c444, c445, c446, c447, c448, c449, c450, c451, c452, c453, c454, c455, c456, c457, c458, c459, c460, c461, c462, c463, c464, c465, c466, c467, c468, c469, c470, c471, c472, c473, c474, c475, c476, c477, c478, c479, c480, c481, c482, c483, c484, c485, c486, c487, c488, c489, c490, c491, c492, c493, c494, c495, c496, c497, c498, c499, c500, c501, c502, c503, c504, c505, c506, c507, c508, c509, c510, c511, c512, c513, c514, c515, c516, c517, c518, c519, c520) from ts_5798.meters;") + tdSql.checkRows(1) + tdSql.query("select last(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, c50, c51, c52, c53, c54, c55, c56, c57, c58, c59, c60, c61, c62, c63, c64, c65, c66, c67, c68, c69, c70, c71, c72, c73, c74, c75, c76, c77, c78, c79, c80, c81, c82, c83, c84, c85, c86, c87, c88, c89, c90, c91, c92, c93, c94, c95, c96, c97, c98, c99, c100, c101, c102, c103, c104, c105, c106, c107, c108, c109, c110, c111, c112, c113, c114, c115, c116, c117, c118, c119, c120, c121, c122, c123, c124, c125, c126, c127, c128, c129, c130, c131, c132, c133, c134, c135, c136, c137, c138, c139, c140, c141, c142, c143, c144, c145, c146, c147, c148, c149, c150, c151, c152, c153, c154, c155, c156, c157, c158, c159, c160, c161, c162, c163, c164, c165, c166, c167, c168, c169, c170, c171, c172, c173, c174, c175, c176, c177, c178, c179, c180, c181, c182, c183, c184, c185, c186, c187, c188, c189, c190, c191, c192, c193, c194, c195, c196, c197, c198, c199, c200, c201, c202, c203, c204, c205, c206, c207, c208, c209, c210, c211, c212, c213, c214, c215, c216, c217, c218, c219, c220, c221, c222, c223, c224, c225, c226, c227, c228, c229, c230, c231, c232, c233, c234, c235, c236, c237, c238, c239, c240, c241, c242, c243, c244, c245, c246, c247, c248, c249, c250, c251, c252, c253, c254, c255, c256, c257, c258, c259, c260, c261, c262, c263, c264, c265, c266, c267, c268, c269, c270, c271, c272, c273, c274, c275, c276, c277, c278, c279, c280, c281, c282, c283, c284, c285, c286, c287, c288, c289, c290, c291, c292, c293, c294, c295, c296, c297, c298, c299, c300, c301, c302, c303, c304, c305, c306, c307, c308, c309, c310, c311, c312, c313, c314, c315, c316, c317, c318, c319, c320, c321, c322, c323, c324, c325, c326, c327, c328, c329, c330, c331, c332, c333, c334, c335, c336, c337, c338, c339, c340, c341, c342, c343, c344, c345, c346, c347, c348, c349, c350, c351, c352, c353, c354, c355, c356, c357, c358, c359, c360, c361, c362, c363, c364, c365, c366, c367, c368, c369, c370, c371, c372, c373, c374, c375, c376, c377, c378, c379, c380, c381, c382, c383, c384, c385, c386, c387, c388, c389, c390, c391, c392, c393, c394, c395, c396, c397, c398, c399, c400, c401, c402, c403, c404, c405, c406, c407, c408, c409, c410, c411, c412, c413, c414, c415, c416, c417, c418, c419, c420, c421, c422, c423, c424, c425, c426, c427, c428, c429, c430, c431, c432, c433, c434, c435, c436, c437, c438, c439, c440, c441, c442, c443, c444, c445, c446, c447, c448, c449, c450, c451, c452, c453, c454, c455, c456, c457, c458, c459, c460, c461, c462, c463, c464, c465, c466, c467, c468, c469, c470, c471, c472, c473, c474, c475, c476, c477, c478, c479, c480, c481, c482, c483, c484, c485, c486, c487, c488, c489, c490, c491, c492, c493, c494, c495, c496, c497, c498, c499, c500, c501, c502, c503, c504, c505, c506, c507, c508, c509, c510, c511, c512, c513, c514, c515, c516, c517, c518, c519, c520) from ts_5798.meters;") + tdSql.checkRows(1) + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c9d28e0623..0377ff1641 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -16,6 +16,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_function.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_selection_function_with_json.py +,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_paramnum.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_percentile.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_resinfo.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_interp.py From 221d1553836517f3e61a18adc96a433bcdb24797 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 19:17:56 +0800 Subject: [PATCH 08/24] enh: ignore some tanalytics files in Jenkinsfile2 --- Jenkinsfile2 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 6fa3483099..7987164ec0 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -7,6 +7,8 @@ file_zh_changed = '' file_en_changed = '' file_no_doc_changed = '1' file_only_tdgpt_change_except = '1' +tdgpt_file = "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c\\|tdgpt_cases.task" + def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -78,7 +80,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : ''', returnStdout: true ).trim() @@ -570,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task/ ) { + if ( file_no_doc_changed =~ ${tdgpt_file} ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From b0b0c30d393165e41bf84bf2c23955edbc1fb46a Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 20:20:57 +0800 Subject: [PATCH 09/24] Update Jenkinsfile2 --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 7987164ec0..8d6851e19c 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -572,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ ${tdgpt_file} ) { + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task|tanalytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From 1d4eb3b5b9745aa98f178dd6f641083b75e4169a Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 20:40:44 +0800 Subject: [PATCH 10/24] Update Jenkinsfile2 --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 8d6851e19c..2763076c5f 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -572,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task|tanalytics/ ) { + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task|tanalytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From bc10b0683a4e624c678cd579036ed850a5a6acfe Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 21:10:59 +0800 Subject: [PATCH 11/24] enh: set file strings as args in Jenkinsfile2 --- Jenkinsfile2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 2763076c5f..df0892cfa1 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -7,7 +7,7 @@ file_zh_changed = '' file_en_changed = '' file_no_doc_changed = '1' file_only_tdgpt_change_except = '1' -tdgpt_file = "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c\\|tdgpt_cases.task" +tdgpt_file = "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c\\|tdgpt_cases.task\\|analytics" def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -80,7 +80,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} " || : ''', returnStdout: true ).trim() @@ -572,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task|tanalytics/ ) { + if ( file_no_doc_changed =~ /${tdgpt_file}/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From f05dc19cff83cd28c1a1de3c94c769f7de513d87 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 23 Dec 2024 21:14:14 +0800 Subject: [PATCH 12/24] enh: set file strings as args in Jenkinsfile2 --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index df0892cfa1..dd148dc447 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -80,7 +80,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} " || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} || : ''', returnStdout: true ).trim() From 493a23a4803da53ca7ae5ab61b47ac135ca247eb Mon Sep 17 00:00:00 2001 From: Haolin Wang Date: Tue, 24 Dec 2024 08:09:35 +0800 Subject: [PATCH 13/24] fix: free unallocated memory in tsdbRowClose() --- source/dnode/vnode/src/tsdb/tsdbCache.c | 6 ++---- source/dnode/vnode/src/tsdb/tsdbUtil.c | 4 +++- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 2047b68101..5151ea3958 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -610,6 +610,7 @@ int32_t tsdbLoadFromImem(SMemTable *imem, int64_t suid, int64_t uid) { int32_t nCol; SArray *ctxArray = pTsdb->rCache.ctxArray; STsdbRowKey tsdbRowKey = {0}; + STSDBRowIter iter = {0}; STbData *pIMem = tsdbGetTbDataFromMemTable(imem, suid, uid); @@ -641,7 +642,6 @@ int32_t tsdbLoadFromImem(SMemTable *imem, int64_t suid, int64_t uid) { tsdbRowGetKey(pMemRow, &tsdbRowKey); - STSDBRowIter iter = {0}; TAOS_CHECK_EXIT(tsdbRowIterOpen(&iter, pMemRow, pTSchema)); int32_t iCol = 0; @@ -685,7 +685,6 @@ int32_t tsdbLoadFromImem(SMemTable *imem, int64_t suid, int64_t uid) { STsdbRowKey tsdbRowKey = {0}; tsdbRowGetKey(pMemRow, &tsdbRowKey); - STSDBRowIter iter = {0}; TAOS_CHECK_EXIT(tsdbRowIterOpen(&iter, pMemRow, pTSchema)); int32_t iCol = 0; @@ -2470,6 +2469,7 @@ static int32_t tsdbCacheGetBatchFromMem(STsdb *pTsdb, tb_uid_t uid, SArray *pLas int numKeys = TARRAY_SIZE(pCidList); MemNextRowIter iter = {0}; SSHashObj *iColHash = NULL; + STSDBRowIter rowIter = {0}; // 1, get from mem, imem filtered with delete info TAOS_CHECK_EXIT(memRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->info.suid, pr->pReadSnap, pr)); @@ -2490,7 +2490,6 @@ static int32_t tsdbCacheGetBatchFromMem(STsdb *pTsdb, tb_uid_t uid, SArray *pLas STsdbRowKey rowKey = {0}; tsdbRowGetKey(pRow, &rowKey); - STSDBRowIter rowIter = {0}; TAOS_CHECK_EXIT(tsdbRowIterOpen(&rowIter, pRow, pTSchema)); int32_t iCol = 0, jCol = 0, jnCol = TARRAY_SIZE(pLastArray); @@ -2564,7 +2563,6 @@ static int32_t tsdbCacheGetBatchFromMem(STsdb *pTsdb, tb_uid_t uid, SArray *pLas STsdbRowKey tsdbRowKey = {0}; tsdbRowGetKey(pRow, &tsdbRowKey); - STSDBRowIter rowIter = {0}; TAOS_CHECK_EXIT(tsdbRowIterOpen(&rowIter, pRow, pTSchema)); iCol = 0; diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index f807ecf2d6..16f6777765 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -699,9 +699,11 @@ int32_t tsdbRowIterOpen(STSDBRowIter *pIter, TSDBROW *pRow, STSchema *pTSchema) } void tsdbRowClose(STSDBRowIter *pIter) { - if (pIter->pRow->type == TSDBROW_ROW_FMT) { + if (pIter->pRow && pIter->pRow->type == TSDBROW_ROW_FMT) { tRowIterClose(&pIter->pIter); } + pIter->pRow = NULL; + pIter->pIter = NULL; } SColVal *tsdbRowIterNext(STSDBRowIter *pIter) { From d3d48d93aa37e8f5484b586afe4d891435db916b Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 24 Dec 2024 09:18:28 +0800 Subject: [PATCH 14/24] revert Jenkinsfile2 --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index dd148dc447..fc00c5e2dc 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -572,7 +572,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /${tdgpt_file}/ ) { + if ( file_no_doc_changed =~ /orecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From f9976c75ef4cbf3e5ed8fcb136646d56d6abe1de Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 24 Dec 2024 09:54:05 +0800 Subject: [PATCH 15/24] fix:error rewrite if terrno is not 0 --- source/dnode/vnode/src/vnd/vnodeSync.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 50bedba75d..cea82c13ff 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -313,7 +313,6 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) if (code != 0) { if (code != TSDB_CODE_MSG_PREPROCESSED) { vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, tstrerror(code)); - if (terrno != 0) code = terrno; } vnodeHandleProposeError(pVnode, pMsg, code); rpcFreeCont(pMsg->pCont); From 8d34a9a532e527ebe9845daef416475c3a68cd98 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 24 Dec 2024 11:34:09 +0800 Subject: [PATCH 16/24] Fix(cfg):community can not use alter cfg. --- source/util/src/tconfig.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index 52794af4dd..547cdb6cdf 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -603,11 +603,11 @@ int32_t checkItemDyn(SConfigItem *pItem, bool isServer) { return TSDB_CODE_SUCCESS; } if (isServer) { - if (pItem->dynScope == CFG_DYN_ENT_CLIENT || pItem->dynScope == CFG_DYN_ENT_CLIENT_LAZY) { + if (pItem->dynScope == CFG_DYN_CLIENT || pItem->dynScope == CFG_DYN_CLIENT_LAZY) { return TSDB_CODE_INVALID_CFG; } } else { - if (pItem->dynScope == CFG_DYN_ENT_SERVER || pItem->dynScope == CFG_DYN_ENT_SERVER_LAZY) { + if (pItem->dynScope == CFG_DYN_SERVER || pItem->dynScope == CFG_DYN_SERVER_LAZY) { return TSDB_CODE_INVALID_CFG; } } From 3c3e8fdbff2aa24659f5ba8c4704764c6fa7c058 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 24 Dec 2024 17:34:57 +0800 Subject: [PATCH 17/24] Add wal & config UT. --- source/libs/wal/inc/walInt.h | 2 +- source/libs/wal/src/walMeta.c | 2 +- source/libs/wal/test/walMetaTest.cpp | 305 +++++++++++++++++++++++---- source/util/src/tconfig.c | 3 +- source/util/test/cfgTest.cpp | 240 ++++++++++++++++++++- 5 files changed, 502 insertions(+), 50 deletions(-) diff --git a/source/libs/wal/inc/walInt.h b/source/libs/wal/inc/walInt.h index 1886541d62..14f6503941 100644 --- a/source/libs/wal/inc/walInt.h +++ b/source/libs/wal/inc/walInt.h @@ -157,7 +157,7 @@ int32_t walLoadMeta(SWal* pWal); int32_t walSaveMeta(SWal* pWal); int32_t walRemoveMeta(SWal* pWal); int32_t walRollFileInfo(SWal* pWal); - +int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* lastVer); int32_t walCheckAndRepairMeta(SWal* pWal); int32_t walCheckAndRepairIdx(SWal* pWal); diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 3faeb53499..470a6b3f40 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -46,7 +46,7 @@ static FORCE_INLINE int walBuildTmpMetaName(SWal* pWal, char* buf) { return snprintf(buf, WAL_FILE_LEN, "%s/meta-ver.tmp", pWal->path); } -static FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* lastVer) { +FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* lastVer) { int32_t code = 0, lino = 0; int32_t sz = taosArrayGetSize(pWal->fileInfoSet); int64_t retVer = -1; diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index a958ad74e0..b2875bdca1 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -127,7 +127,7 @@ class WalRetentionEnv : public ::testing::Test { SWalCfg cfg; cfg.rollPeriod = -1; cfg.segSize = -1; - cfg.committed =-1; + cfg.committed = -1; cfg.retentionPeriod = -1; cfg.retentionSize = 0; cfg.rollPeriod = 0; @@ -146,6 +146,83 @@ class WalRetentionEnv : public ::testing::Test { const char* pathName = TD_TMP_DIR_PATH "wal_test"; }; +class WalSkipLevel : public ::testing::Test { + protected: + static void SetUpTestCase() { + int code = walInit(NULL); + ASSERT(code == 0); + } + + static void TearDownTestCase() { walCleanUp(); } + + void walResetEnv() { + TearDown(); + taosRemoveDir(pathName); + SetUp(); + } + + void SetUp() override { + SWalCfg cfg; + cfg.rollPeriod = -1; + cfg.segSize = -1; + cfg.committed = -1; + cfg.retentionPeriod = -1; + cfg.retentionSize = 0; + cfg.rollPeriod = 0; + cfg.vgId = 1; + cfg.level = TAOS_WAL_SKIP; + pWal = walOpen(pathName, &cfg); + ASSERT(pWal != NULL); + } + + void TearDown() override { + walClose(pWal); + pWal = NULL; + } + + SWal* pWal = NULL; + const char* pathName = TD_TMP_DIR_PATH "wal_test"; +}; + +class WalEncrypted : public ::testing::Test { + protected: + static void SetUpTestCase() { + int code = walInit(NULL); + ASSERT(code == 0); + } + + static void TearDownTestCase() { walCleanUp(); } + + void walResetEnv() { + TearDown(); + taosRemoveDir(pathName); + SetUp(); + } + + void SetUp() override { + SWalCfg cfg; + cfg.rollPeriod = -1; + cfg.segSize = -1; + cfg.committed = -1; + cfg.retentionPeriod = -1; + cfg.retentionSize = 0; + cfg.rollPeriod = 0; + cfg.vgId = 0; + cfg.level = TAOS_WAL_FSYNC; + cfg.encryptAlgorithm = 1; + pWal = walOpen(pathName, &cfg); + ASSERT(pWal != NULL); + } + + void TearDown() override { + walClose(pWal); + pWal = NULL; + } + + SWal* pWal = NULL; + const char* pathName = TD_TMP_DIR_PATH "wal_test"; +}; + TEST_F(WalCleanEnv, createNew) { walRollFileInfo(pWal); ASSERT(pWal->fileInfoSet != NULL); @@ -373,6 +450,183 @@ TEST_F(WalKeepEnv, readHandleRead) { walCloseReader(pRead); } +TEST_F(WalKeepEnv, walLogExist) { + walResetEnv(); + int code; + SWalReader* pRead = walOpenReader(pWal, NULL, 0); + ASSERT(pRead != NULL); + + int i; + for (i = 0; i < 100; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + walLogExist(pWal, 0); + ASSERT_EQ(code, 0); + walCloseReader(pRead); +} + +TEST_F(WalKeepEnv, walScanLogGetLastVerHeadMissMatch) { + walResetEnv(); + int code; + do { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, 0); + sprintf(newStr, "%s-%d", ranStr, 0); + int len = strlen(newStr); + code = walAppendLog(pWal, 0, 0, syncMeta, newStr, len); + } while (0); + + int i = 0; + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + int64_t offset = walGetCurFileOffset(pWal); + SWalFileInfo* pFileInfo = walGetCurFileInfo(pWal); + + pWal->writeHead.head.version = i; + pWal->writeHead.head.bodyLen = len; + pWal->writeHead.head.msgType = 0; + pWal->writeHead.head.ingestTs = taosGetTimestampUs(); + + pWal->writeHead.head.syncMeta = syncMeta; + + pWal->writeHead.cksumHead = 1; + pWal->writeHead.cksumBody = walCalcBodyCksum(newStr, len); + taosWriteFile(pWal->pLogFile, &pWal->writeHead, sizeof(SWalCkHead)); + taosWriteFile(pWal->pLogFile, newStr, len); + + int64_t lastVer = 0; + code = walScanLogGetLastVer(pWal, 0, &lastVer); + ASSERT_EQ(code, TSDB_CODE_WAL_CHKSUM_MISMATCH); +} + +TEST_F(WalKeepEnv, walScanLogGetLastVerBodyMissMatch) { + walResetEnv(); + int code; + do { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, 0); + sprintf(newStr, "%s-%d", ranStr, 0); + int len = strlen(newStr); + code = walAppendLog(pWal, 0, 0, syncMeta, newStr, len); + } while (0); + + int i = 0; + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + int64_t offset = walGetCurFileOffset(pWal); + SWalFileInfo* pFileInfo = walGetCurFileInfo(pWal); + + pWal->writeHead.head.version = i; + pWal->writeHead.head.bodyLen = len; + pWal->writeHead.head.msgType = 0; + pWal->writeHead.head.ingestTs = taosGetTimestampUs(); + + pWal->writeHead.head.syncMeta = syncMeta; + + pWal->writeHead.cksumHead = walCalcHeadCksum(&pWal->writeHead); + pWal->writeHead.cksumBody = 1; + taosWriteFile(pWal->pLogFile, &pWal->writeHead, sizeof(SWalCkHead)); + taosWriteFile(pWal->pLogFile, newStr, len); + + int64_t lastVer = 0; + code = walScanLogGetLastVer(pWal, 0, &lastVer); + ASSERT_EQ(code, TSDB_CODE_WAL_CHKSUM_MISMATCH); +} + +TEST_F(WalKeepEnv, walCheckAndRepairIdxFile) { + walResetEnv(); + int code; + do { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, 0); + sprintf(newStr, "%s-%d", ranStr, 0); + int len = strlen(newStr); + code = walAppendLog(pWal, 0, 0, syncMeta, newStr, len); + } while (0); + SWalFileInfo* pFileInfo = walGetCurFileInfo(pWal); + for (int i = 1; i < 100; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + pWal->writeHead.head.version = i; + pWal->writeHead.head.bodyLen = len; + pWal->writeHead.head.msgType = 0; + pWal->writeHead.head.ingestTs = taosGetTimestampUs(); + pWal->writeHead.head.syncMeta = syncMeta; + pWal->writeHead.cksumHead = walCalcHeadCksum(&pWal->writeHead); + pWal->writeHead.cksumBody = walCalcBodyCksum(newStr, len); + taosWriteFile(pWal->pLogFile, &pWal->writeHead, sizeof(SWalCkHead)); + taosWriteFile(pWal->pLogFile, newStr, len); + } + pWal->vers.lastVer = 99; + pFileInfo->lastVer = 99; + code = walCheckAndRepairIdx(pWal); + ASSERT_EQ(code, 0); +} + +TEST_F(WalKeepEnv, walRestoreFromSnapshot1) { + walResetEnv(); + int code; + + int i; + for (i = 0; i < 100; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + code = walRestoreFromSnapshot(pWal, 50); + ASSERT_EQ(code, 0); +} + +TEST_F(WalKeepEnv, walRestoreFromSnapshot2) { + walResetEnv(); + int code; + + int i; + for (i = 0; i < 100; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + SWalRef* ref = walOpenRef(pWal); + ref->refVer = 10; + code = walRestoreFromSnapshot(pWal, 99); + ASSERT_EQ(code, -1); +} + +TEST_F(WalKeepEnv, walRollback) { + walResetEnv(); + int code; + + int i; + for (i = 0; i < 100; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + code = walRollback(pWal, -1); + ASSERT_EQ(code, TSDB_CODE_WAL_INVALID_VER); + pWal->vers.lastVer = 50; + pWal->vers.commitVer = 40; + pWal->vers.snapshotVer = 40; + SWalFileInfo* fileInfo = walGetCurFileInfo(pWal); + + code = walRollback(pWal, 48); + ASSERT_EQ(code, 0); +} + TEST_F(WalRetentionEnv, repairMeta1) { walResetEnv(); int code; @@ -456,44 +710,6 @@ TEST_F(WalRetentionEnv, repairMeta1) { walCloseReader(pRead); } -class WalSkipLevel : public ::testing::Test { - protected: - static void SetUpTestCase() { - int code = walInit(NULL); - ASSERT(code == 0); - } - - static void TearDownTestCase() { walCleanUp(); } - - void walResetEnv() { - TearDown(); - taosRemoveDir(pathName); - SetUp(); - } - - void SetUp() override { - SWalCfg cfg; - cfg.rollPeriod = -1; - cfg.segSize = -1; - cfg.committed =-1; - cfg.retentionPeriod = -1; - cfg.retentionSize = 0; - cfg.rollPeriod = 0; - cfg.vgId = 1; - cfg.level = TAOS_WAL_SKIP; - pWal = walOpen(pathName, &cfg); - ASSERT(pWal != NULL); - } - - void TearDown() override { - walClose(pWal); - pWal = NULL; - } - - SWal* pWal = NULL; - const char* pathName = TD_TMP_DIR_PATH "wal_test"; -}; - TEST_F(WalSkipLevel, restart) { walResetEnv(); int code; @@ -533,4 +749,15 @@ TEST_F(WalSkipLevel, roll) { ASSERT_EQ(code, 0); code = walEndSnapshot(pWal); ASSERT_EQ(code, 0); +} + +TEST_F(WalEncrypted, write) { + int code; + for (int i = 0; i < 100; i++) { + code = walAppendLog(pWal, i, i + 1, syncMeta, (void*)ranStr, ranStrLen); + ASSERT_EQ(code, 0); + ASSERT_EQ(pWal->vers.lastVer, i); + } + code = walSaveMeta(pWal); + ASSERT_EQ(code, 0); } \ No newline at end of file diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index ee88996c29..77203c4c14 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -517,7 +517,7 @@ int32_t cfgSetItemVal(SConfigItem *pItem, const char *name, const char *value, E int32_t code = TSDB_CODE_SUCCESS; if (pItem == NULL) { - TAOS_RETURN(TSDB_CODE_INVALID_CFG); + TAOS_RETURN(TSDB_CODE_CFG_NOT_FOUND); } switch (pItem->dtype) { case CFG_DTYPE_BOOL: { @@ -627,6 +627,7 @@ int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *p cfgUnLock(pCfg); TAOS_RETURN(code); } + if ((pItem->category == CFG_CATEGORY_GLOBAL) && alterType == CFG_ALTER_DNODE) { uError("failed to config:%s, not support update global config on only one dnode", name); cfgUnLock(pCfg); diff --git a/source/util/test/cfgTest.cpp b/source/util/test/cfgTest.cpp index a5812d375b..d2967131f8 100644 --- a/source/util/test/cfgTest.cpp +++ b/source/util/test/cfgTest.cpp @@ -10,6 +10,8 @@ */ #include +#include +#include "osFile.h" #include "tconfig.h" class CfgTest : public ::testing::Test { @@ -35,6 +37,9 @@ TEST_F(CfgTest, 01_Str) { EXPECT_STREQ(cfgStypeStr(CFG_STYPE_ENV_CMD), "env_cmd"); EXPECT_STREQ(cfgStypeStr(CFG_STYPE_APOLLO_URL), "apollo_url"); EXPECT_STREQ(cfgStypeStr(CFG_STYPE_ARG_LIST), "arg_list"); + EXPECT_STREQ(cfgStypeStr(CFG_STYPE_TAOS_OPTIONS), "taos_options"); + EXPECT_STREQ(cfgStypeStr(CFG_STYPE_ALTER_CLIENT_CMD), "alter_client_cmd"); + EXPECT_STREQ(cfgStypeStr(CFG_STYPE_ALTER_SERVER_CMD), "alter_server_cmd"); EXPECT_STREQ(cfgStypeStr(ECfgSrcType(1024)), "invalid"); EXPECT_STREQ(cfgDtypeStr(CFG_DTYPE_NONE), "none"); @@ -47,6 +52,10 @@ TEST_F(CfgTest, 01_Str) { EXPECT_STREQ(cfgDtypeStr(CFG_DTYPE_DIR), "dir"); EXPECT_STREQ(cfgDtypeStr(CFG_DTYPE_DIR), "dir"); EXPECT_STREQ(cfgDtypeStr(CFG_DTYPE_DIR), "dir"); + EXPECT_STREQ(cfgDtypeStr(CFG_DTYPE_DOUBLE), "double"); + EXPECT_STREQ(cfgDtypeStr(CFG_DTYPE_LOCALE), "locale"); + EXPECT_STREQ(cfgDtypeStr(CFG_DTYPE_CHARSET), "charset"); + EXPECT_STREQ(cfgDtypeStr(CFG_DTYPE_TIMEZONE), "timezone"); EXPECT_STREQ(cfgDtypeStr(ECfgDataType(1024)), "invalid"); } @@ -57,24 +66,30 @@ TEST_F(CfgTest, 02_Basic) { ASSERT_EQ(code, TSDB_CODE_SUCCESS); ASSERT_NE(pConfig, nullptr); - EXPECT_EQ(cfgAddBool(pConfig, "test_bool", 0, 0, 0, 0), 0); - EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 1, 0, 16, 0, 0, 0), 0); - EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 2, 0, 16, 0, 0, 0), 0); - EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 3, 0, 16, 0, 0, 0), 0); - EXPECT_EQ(cfgAddString(pConfig, "test_string", "4", 0, 0, 0), 0); - EXPECT_EQ(cfgAddDir(pConfig, "test_dir", TD_TMP_DIR_PATH, 0, 0, 0), 0); + EXPECT_EQ(cfgAddBool(pConfig, "test_bool", 0, 0, 6, 0), 0); + + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 21, 0, 16, 0, 1, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 1, 0, 16, 0, 1, 0), 0); + + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 21, 0, 16, 0, 2, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 2, 0, 16, 0, 2, 0), 0); + + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 21, 0, 16, 0, 6, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 3, 0, 16, 0, 6, 0), 0); + EXPECT_EQ(cfgAddString(pConfig, "test_string", "4", 0, 6, 0), 0); + EXPECT_EQ(cfgAddDir(pConfig, "test_dir", TD_TMP_DIR_PATH, 0, 6, 0), 0); EXPECT_EQ(cfgGetSize(pConfig), 6); int32_t size = cfgGetSize(pConfig); - SConfigItem* pItem = NULL; + SConfigItem *pItem = NULL; SConfigIter *pIter = NULL; code = cfgCreateIter(pConfig, &pIter); ASSERT_EQ(code, TSDB_CODE_SUCCESS); ASSERT_NE(pIter, nullptr); - while((pItem = cfgNextIter(pIter)) != NULL) { + while ((pItem = cfgNextIter(pIter)) != NULL) { switch (pItem->dtype) { case CFG_DTYPE_BOOL: printf("index:%d, cfg:%s value:%d\n", size, pItem->name, pItem->bval); @@ -115,12 +130,16 @@ TEST_F(CfgTest, 02_Basic) { EXPECT_EQ(pItem->dtype, CFG_DTYPE_INT32); EXPECT_STREQ(pItem->name, "test_int32"); EXPECT_EQ(pItem->i32, 1); + code = cfgSetItem(pConfig, "test_int32", "21", CFG_STYPE_DEFAULT, true); + ASSERT_EQ(code, TSDB_CODE_OUT_OF_RANGE); pItem = cfgGetItem(pConfig, "test_int64"); EXPECT_EQ(pItem->stype, CFG_STYPE_DEFAULT); EXPECT_EQ(pItem->dtype, CFG_DTYPE_INT64); EXPECT_STREQ(pItem->name, "test_int64"); EXPECT_EQ(pItem->i64, 2); + code = cfgSetItem(pConfig, "test_int64", "21", CFG_STYPE_DEFAULT, true); + ASSERT_EQ(code, TSDB_CODE_OUT_OF_RANGE); pItem = cfgGetItem(pConfig, "test_float"); EXPECT_EQ(pItem->stype, CFG_STYPE_DEFAULT); @@ -140,5 +159,210 @@ TEST_F(CfgTest, 02_Basic) { EXPECT_STREQ(pItem->name, "test_dir"); EXPECT_STREQ(pItem->str, TD_TMP_DIR_PATH); + code = cfgGetAndSetItem(pConfig, &pItem, "err_cfg", "err_val", CFG_STYPE_DEFAULT, true); + ASSERT_EQ(code, TSDB_CODE_CFG_NOT_FOUND); + + code = cfgCheckRangeForDynUpdate(pConfig, "test_int32", "4", false, CFG_ALTER_LOCAL); + ASSERT_EQ(code, TSDB_CODE_INVALID_CFG); + + code = cfgCheckRangeForDynUpdate(pConfig, "test_int64", "4", true, CFG_ALTER_LOCAL); + ASSERT_EQ(code, TSDB_CODE_INVALID_CFG); + + code = cfgCheckRangeForDynUpdate(pConfig, "test_bool", "3", false, CFG_ALTER_LOCAL); + ASSERT_EQ(code, TSDB_CODE_OUT_OF_RANGE); + + code = cfgCheckRangeForDynUpdate(pConfig, "test_int32", "74", true, CFG_ALTER_LOCAL); + ASSERT_EQ(code, TSDB_CODE_OUT_OF_RANGE); + + code = cfgCheckRangeForDynUpdate(pConfig, "test_int64", "74", false, CFG_ALTER_LOCAL); + ASSERT_EQ(code, TSDB_CODE_OUT_OF_RANGE); + + code = cfgCheckRangeForDynUpdate(pConfig, "test_float", "74", false, CFG_ALTER_LOCAL); + ASSERT_EQ(code, TSDB_CODE_OUT_OF_RANGE); + cfgCleanup(pConfig); } + +TEST_F(CfgTest, initWithArray) { + SConfig *pConfig = NULL; + int32_t code = cfgInit(&pConfig); + + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_NE(pConfig, nullptr); + + EXPECT_EQ(cfgAddBool(pConfig, "test_bool", 0, 0, 0, 0), 0); + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 1, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 2, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 3, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddString(pConfig, "test_string", "4", 0, 0, 0), 0); + EXPECT_EQ(cfgAddDir(pConfig, "test_dir", TD_TMP_DIR_PATH, 0, 0, 0), 0); + + SArray *pArgs = taosArrayInit(6, sizeof(SConfigPair)); + SConfigPair *pPair = (SConfigPair *)taosMemoryMalloc(sizeof(SConfigPair)); + pPair->name = "test_bool"; + pPair->value = "1"; + taosArrayPush(pArgs, pPair); + SConfigPair *pPair1 = (SConfigPair *)taosMemoryMalloc(sizeof(SConfigPair)); + pPair1->name = "test_int32"; + pPair1->value = "2"; + taosArrayPush(pArgs, pPair1); + SConfigPair *pPair2 = (SConfigPair *)taosMemoryMalloc(sizeof(SConfigPair)); + pPair2->name = "test_int64"; + pPair2->value = "3"; + taosArrayPush(pArgs, pPair2); + SConfigPair *pPair3 = (SConfigPair *)taosMemoryMalloc(sizeof(SConfigPair)); + pPair3->name = "test_float"; + pPair3->value = "4"; + taosArrayPush(pArgs, pPair3); + SConfigPair *pPair4 = (SConfigPair *)taosMemoryMalloc(sizeof(SConfigPair)); + pPair4->name = "test_string"; + pPair4->value = "5"; + taosArrayPush(pArgs, pPair4); + SConfigPair *pPair5 = (SConfigPair *)taosMemoryMalloc(sizeof(SConfigPair)); + pPair5->name = "test_dir"; + pPair5->value = TD_TMP_DIR_PATH; + taosArrayPush(pArgs, pPair5); + code = cfgLoadFromArray(pConfig, pArgs); + ASSERT_EQ(code, TSDB_CODE_SUCCESS); +} + +TEST_F(CfgTest, cfgDumpItemCategory) { + SConfig *pConfig = NULL; + int32_t code = cfgInit(&pConfig); + + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_NE(pConfig, nullptr); + + EXPECT_EQ(cfgAddBool(pConfig, "test_bool", 0, 0, 6, 100), 0); + + SConfigItem *pItem = NULL; + pItem = cfgGetItem(pConfig, "test_bool"); + EXPECT_EQ(pItem->stype, CFG_STYPE_DEFAULT); + EXPECT_EQ(pItem->dtype, CFG_DTYPE_BOOL); + EXPECT_STREQ(pItem->name, "test_bool"); + EXPECT_EQ(pItem->bval, 0); + + EXPECT_EQ(cfgDumpItemCategory(pItem, NULL, 0, 0), TSDB_CODE_INVALID_CFG); +} + +TEST_F(CfgTest, cfgDumpCfgS3) { + SConfig *pConfig = NULL; + int32_t code = cfgInit(&pConfig); + + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_NE(pConfig, nullptr); + + cfgAddInt32(pConfig, "s3MigrateIntervalSec", 60 * 60, 600, 100000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER, + CFG_CATEGORY_GLOBAL); + cfgAddBool(pConfig, "s3MigrateEnabled", 60 * 60, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER, CFG_CATEGORY_GLOBAL); + cfgAddString(pConfig, "s3Accesskey", "", CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY, CFG_CATEGORY_GLOBAL); + cfgAddString(pConfig, "s3Endpoint", "", CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY, CFG_CATEGORY_GLOBAL); + cfgAddString(pConfig, "s3BucketName", "", CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY, CFG_CATEGORY_GLOBAL); + cfgAddInt32(pConfig, "s3PageCacheSize", 10, 4, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY, + CFG_CATEGORY_GLOBAL); + cfgAddInt32(pConfig, "s3UploadDelaySec", 10, 1, 60 * 60 * 24 * 30, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER, + CFG_CATEGORY_GLOBAL); + cfgAddDir(pConfig, "scriptDir", configDir, CFG_SCOPE_BOTH, CFG_DYN_NONE, CFG_CATEGORY_LOCAL); + + cfgDumpCfgS3(pConfig, false, false); + + cfgDumpCfgS3(pConfig, true, true); + + cfgDumpCfgS3(pConfig, false, true); + + cfgDumpCfgS3(pConfig, true, false); +} + +TEST_F(CfgTest, cfgLoadFromEnvVar) { + SConfig *pConfig = NULL; + int32_t code = cfgInit(&pConfig); + + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_NE(pConfig, nullptr); + + EXPECT_EQ(cfgAddBool(pConfig, "test_bool", 0, 0, 6, 0), 0); + + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 21, 0, 16, 0, 1, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 1, 0, 16, 0, 1, 0), 0); + + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 21, 0, 16, 0, 2, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 2, 0, 16, 0, 2, 0), 0); + + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 21, 0, 16, 0, 6, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 3, 0, 16, 0, 6, 0), 0); + EXPECT_EQ(cfgAddString(pConfig, "test_string", "4", 0, 6, 0), 0); + EXPECT_EQ(cfgAddDir(pConfig, "test_dir", TD_TMP_DIR_PATH, 0, 6, 0), 0); + + setenv("test_bool", "1", 1); + setenv("test_int32", "2", 1); + setenv("test_int64", "3", 1); + setenv("test_float", "4", 1); + setenv("test_string", "5", 1); + setenv("test_dir", TD_TMP_DIR_PATH, 1); + + ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_ENV_VAR, "test_bool"), TSDB_CODE_SUCCESS); +} + +TEST_F(CfgTest, cfgLoadFromEnvCmd) { + SConfig *pConfig = NULL; + int32_t code = cfgInit(&pConfig); + + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_NE(pConfig, nullptr); + + EXPECT_EQ(cfgAddBool(pConfig, "test_bool", 0, 0, 6, 0), 0); + + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 21, 0, 16, 0, 1, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 1, 0, 16, 0, 1, 0), 0); + + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 21, 0, 16, 0, 2, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 2, 0, 16, 0, 2, 0), 0); + + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 21, 0, 16, 0, 6, 0), TSDB_CODE_OUT_OF_RANGE); + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 3, 0, 16, 0, 6, 0), 0); + EXPECT_EQ(cfgAddString(pConfig, "test_string", "4", 0, 6, 0), 0); + + const char *envCmd[] = {"test_bool=1", "test_int32=2", "test_int64=3", "test_float=4", "test_string=5", NULL}; + + ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_ENV_CMD, envCmd), TSDB_CODE_SUCCESS); +} + +TEST_F(CfgTest, cfgLoadFromEnvFile) { + SConfig *pConfig = NULL; + int32_t code = cfgInit(&pConfig); + + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_NE(pConfig, nullptr); + + TdFilePtr envFile = NULL; + const char *envFilePath = TD_TMP_DIR_PATH "envFile"; + envFile = taosOpenFile(envFilePath, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); + const char *buf = "test_bool=1\ntest_int32=2\ntest_int64=3\ntest_float=4\ntest_string=5\n"; + taosWriteFile(envFile, buf, strlen(buf)); + taosCloseFile(&envFile); + ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_ENV_FILE, envFilePath), TSDB_CODE_SUCCESS); + + taosRemoveFile(envFilePath); +} + +TEST_F(CfgTest, cfgLoadFromApollUrl) { + SConfig *pConfig = NULL; + int32_t code = cfgInit(&pConfig); + + ASSERT_EQ(code, TSDB_CODE_SUCCESS); + ASSERT_NE(pConfig, nullptr); + + TdFilePtr jsonFile = NULL; + const char *jsonFilePath = TD_TMP_DIR_PATH "envJson.json"; + jsonFile = taosOpenFile(jsonFilePath, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); + const char *buf = + "{\"test_bool\":\"1\",\"test_int32\":\"2\",\"test_int64\":\"3\",\"test_float\":\"4\",\"test_string\":\"5\"}"; + taosWriteFile(jsonFile, buf, strlen(buf)); + taosCloseFile(&jsonFile); + + char str[256]; + snprintf(str, sizeof(str), "jsonFile:%s", jsonFilePath); + ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_APOLLO_URL, str), TSDB_CODE_INVALID_DATA_FMT); + + taosRemoveFile(jsonFilePath); +} \ No newline at end of file From 8150c75d62b0e612246fa9206ad091c97a67833e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 24 Dec 2024 17:36:00 +0800 Subject: [PATCH 18/24] Update 03-ad.md --- docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md index 5a9ac20140..7c85d41c50 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md @@ -44,10 +44,10 @@ class _MyAnomalyDetectionService(AbstractAnomalyDetectionService): def set_params(self, params): """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑""" - pass + return super().set_params(params) ``` -将该文件保存在 `./lib/taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口 taos 中执行 `SHOW ANODES FULL` 就能够看到新加入的算法,然后应用就可以通过 SQL 语句调用该检测算法。 +将该文件保存在 `./lib/taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口 taos 中执行 `SHOW ANODES FULL` 就能够看到新加入的算法,然后就可以通过 SQL 语句调用该算法。 ```SQL --- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类 @@ -65,7 +65,7 @@ def test_myad(self): s = loader.get_service("myad") # 设置需要进行检测的输入数据 - s.set_input_list(AnomalyDetectionTest.input_list) + s.set_input_list(AnomalyDetectionTest.input_list, None) r = s.execute() From 808459f887080c08f0a1e88d10cd8045e3f91092 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 24 Dec 2024 18:11:56 +0800 Subject: [PATCH 19/24] Update 02-forecast.md --- docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md index 5395dc374b..841722c6a2 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md @@ -99,7 +99,7 @@ def test_myfc(self): s = loader.get_service("myfc") # 设置用于预测分析的数据 - s.set_input_list(self.get_input_list()) + s.set_input_list(self.get_input_list(), None) # 检查预测结果应该全部为 1 r = s.set_params( {"fc_rows": 10, "start_ts": 171000000, "time_step": 86400 * 30, "start_p": 0} From 65186dcb0fb5d2d015ee987435ff6166c5d01d3d Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 24 Dec 2024 18:16:30 +0800 Subject: [PATCH 20/24] revert Jenkinsfile2 --- Jenkinsfile2 | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index fc00c5e2dc..1b2f28908c 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -7,7 +7,8 @@ file_zh_changed = '' file_en_changed = '' file_no_doc_changed = '1' file_only_tdgpt_change_except = '1' -tdgpt_file = "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c\\|tdgpt_cases.task\\|analytics" +tdgpt_file = "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" + def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -69,7 +70,7 @@ def check_docs(){ returnStdout: true ) - file_no_doc_changed = sh ( + def file_no_doc_changed = sh ( script: ''' cd ${WKC} git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" || : @@ -80,7 +81,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" ||: ''', returnStdout: true ).trim() @@ -572,7 +573,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /orecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) { + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 From 1f434d7d4e064996b1576ca63663cc5db9157c7d Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 24 Dec 2024 19:05:02 +0800 Subject: [PATCH 21/24] Add case for sdbFile.c . --- tests/parallel_test/cases.task | 1 + tests/system-test/6-cluster/mnodeEncrypt.py | 69 +++++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 tests/system-test/6-cluster/mnodeEncrypt.py diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1aef2195db..82bbeaaeb5 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -777,6 +777,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/compactDBConflict.py -N 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/mnodeEncrypt.py 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py -Q 2 diff --git a/tests/system-test/6-cluster/mnodeEncrypt.py b/tests/system-test/6-cluster/mnodeEncrypt.py new file mode 100644 index 0000000000..e878611d32 --- /dev/null +++ b/tests/system-test/6-cluster/mnodeEncrypt.py @@ -0,0 +1,69 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +from numpy import row_stack +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * +sys.path.append("./6-cluster") +from clusterCommonCreate import * +from clusterCommonCheck import clusterComCheck + +import time +import socket +import subprocess +from multiprocessing import Process +import threading +import time +import inspect +import ctypes + +class TDTestCase: + + def init(self, conn, logSql, replicaVar=1): + tdLog.debug(f"start to excute {__file__}") + self.TDDnodes = None + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + + + def run(self): + tdSql.execute('create database if not exists db'); + tdSql.execute('use db') + tdSql.execute('create table st (ts timestamp, i int, j float, k double) tags(a int)') + + for i in range(0, 2): + tdSql.execute("create table if not exists db.t%d using db.st tags(%d)" % (i, i)) + + + for i in range(2, 4): + tdSql.execute("create table if not exists db.t%d using db.st tags(%d)" % (i, i)) + + sql = "show db.tables" + tdSql.query(sql) + tdSql.checkRows(4) + + timestamp = 1530374400000 + for i in range (4) : + val = i + sql = "insert into db.t%d values(%d, %d, %d, %d)" % (i, timestamp, val, val, val) + tdSql.execute(sql) + + for i in range ( 4) : + val = i + sql = "select * from db.t%d" % (i) + tdSql.query(sql) + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From eaa416f7f0846720154e9ebf848d60e8c3837431 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 24 Dec 2024 20:03:02 +0800 Subject: [PATCH 22/24] Fix res check. --- source/util/test/cfgTest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/test/cfgTest.cpp b/source/util/test/cfgTest.cpp index d2967131f8..52f6c0844c 100644 --- a/source/util/test/cfgTest.cpp +++ b/source/util/test/cfgTest.cpp @@ -362,7 +362,7 @@ TEST_F(CfgTest, cfgLoadFromApollUrl) { char str[256]; snprintf(str, sizeof(str), "jsonFile:%s", jsonFilePath); - ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_APOLLO_URL, str), TSDB_CODE_INVALID_DATA_FMT); + ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_APOLLO_URL, str), 0); taosRemoveFile(jsonFilePath); } \ No newline at end of file From 3a29a7e2c3a78e1e415ce5a9dbcb66f65824e7e1 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 24 Dec 2024 22:15:36 +0800 Subject: [PATCH 23/24] fix windows build. --- source/util/test/cfgTest.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/util/test/cfgTest.cpp b/source/util/test/cfgTest.cpp index 52f6c0844c..3894ca1061 100644 --- a/source/util/test/cfgTest.cpp +++ b/source/util/test/cfgTest.cpp @@ -273,6 +273,7 @@ TEST_F(CfgTest, cfgDumpCfgS3) { cfgDumpCfgS3(pConfig, true, false); } +#ifndef WINDOWS TEST_F(CfgTest, cfgLoadFromEnvVar) { SConfig *pConfig = NULL; int32_t code = cfgInit(&pConfig); @@ -302,6 +303,7 @@ TEST_F(CfgTest, cfgLoadFromEnvVar) { ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_ENV_VAR, "test_bool"), TSDB_CODE_SUCCESS); } +#endif TEST_F(CfgTest, cfgLoadFromEnvCmd) { SConfig *pConfig = NULL; From c822e57aef3dd2dc182513e7380de81d0d4f72e2 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 24 Dec 2024 22:57:51 +0800 Subject: [PATCH 24/24] fix windows build. --- source/util/test/cfgTest.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/source/util/test/cfgTest.cpp b/source/util/test/cfgTest.cpp index 3894ca1061..74c34f5c91 100644 --- a/source/util/test/cfgTest.cpp +++ b/source/util/test/cfgTest.cpp @@ -10,10 +10,12 @@ */ #include -#include -#include "osFile.h" #include "tconfig.h" +#ifndef WINDOWS +#include "osFile.h" +#endif + class CfgTest : public ::testing::Test { protected: static void SetUpTestSuite() {} @@ -303,7 +305,6 @@ TEST_F(CfgTest, cfgLoadFromEnvVar) { ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_ENV_VAR, "test_bool"), TSDB_CODE_SUCCESS); } -#endif TEST_F(CfgTest, cfgLoadFromEnvCmd) { SConfig *pConfig = NULL; @@ -367,4 +368,6 @@ TEST_F(CfgTest, cfgLoadFromApollUrl) { ASSERT_EQ(cfgLoad(pConfig, CFG_STYPE_APOLLO_URL, str), 0); taosRemoveFile(jsonFilePath); -} \ No newline at end of file +} + +#endif \ No newline at end of file