From f2e9e914879a9347ebd2512920f199b4065d3107 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 11 Dec 2024 16:22:29 +0800 Subject: [PATCH 01/24] Add sdbCancelFetch and sdbRelease while exit iter. --- source/dnode/mnode/impl/src/mndMnode.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 6b1c97b399..413fd3aec5 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -917,7 +917,9 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->id, false); if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); goto _out; } @@ -927,7 +929,9 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, b1, false); if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); goto _out; } @@ -948,7 +952,9 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)b2, false); if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); goto _out; } const char *status = "ready"; @@ -960,14 +966,18 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)b3, false); if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); goto _out; } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->createdTime, false); if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); goto _out; } @@ -975,7 +985,9 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)&roleTimeMs, false); if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, terrstr()); + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); goto _out; } From 2d86c5df91449e0058e69840937bf08b324003cf Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 12 Dec 2024 11:44:31 +0800 Subject: [PATCH 02/24] docs: add faq of why database disappear and clusterId change --- docs/zh/27-train-faq/01-faq.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index b7d20cac5a..487fb29acb 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -289,3 +289,12 @@ https://docs.taosdata.com/reference/components/taosd/#%E7%9B%91%E6%8E%A7%E7%9B%B 因此用户首先要检查服务端,集群的所有端口(原生连接默认6030,http连接默认6041)有无打开;其次是客户端的hosts文件中是否配置了集群所有节点的fqdn与IP信息。 如仍无法解决,则需要联系涛思技术人员支持。 + +### 32 同一台服务器,数据库的数据目录 dataDir 不变,为什么原有数据库丢失且集群 ID 发生了变化? +背景知识:TDengine 服务端进程(taosd)在启动时,若数据目录(dataDir,该目录在配置文件 taos.cfg 中指定)下不存在有效的数据文件子目录(如 mnode、dnode 和 vnode 等),则会自动创建这些目录。在创建新的 mnode 目录的同时,会分配一个新的集群 ID,从而创建一个新的集群。 + +原因分析:taosd 的数据目录 dataDir 可以指向多个不同的挂载点。如果这些挂载点未在 fstab 文件中配置自动挂载,服务器重启后,dataDir 将仅作为一个本地磁盘的普通目录存在,而未能按预期指向挂载的磁盘。此时,若 taosd 服务启动,它将在 dataDir 下新建目录,从而产生一个新的集群。 + +问题影响:服务器重启后,原有数据库丢失且集群 ID 发生变化,导致无法访问原有数据库。对于企业版用户,如果已针对集群 ID 进行授权,则会发现集群服务器的机器码未变,但原有的授权已失效。如果未进行监控或者未及时发现并进行处理,则不会注意到数据库已经丢失,从而造成损失,增加运维成本。 + +问题解决:应在 fstab 文件中配置 dataDir 目录的自动挂载,确保 dataDir 始终指向预期的挂载点和目录,此时,再重启服务器,会找回原有的数据库和集群。在后续的版本中,我们将开发一个功能,使 taosd 在检测到启动前后 dataDir 发生变化时,在启动阶段退出,同时提供相应的错误提示。 \ No newline at end of file From 9b178855b80ceb2932d02fd004633dec9ba33e07 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 12 Dec 2024 11:49:38 +0800 Subject: [PATCH 03/24] docs: add faq of why database disappear and clusterId change --- docs/zh/27-train-faq/01-faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 487fb29acb..58e3c452c8 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -295,6 +295,6 @@ https://docs.taosdata.com/reference/components/taosd/#%E7%9B%91%E6%8E%A7%E7%9B%B 原因分析:taosd 的数据目录 dataDir 可以指向多个不同的挂载点。如果这些挂载点未在 fstab 文件中配置自动挂载,服务器重启后,dataDir 将仅作为一个本地磁盘的普通目录存在,而未能按预期指向挂载的磁盘。此时,若 taosd 服务启动,它将在 dataDir 下新建目录,从而产生一个新的集群。 -问题影响:服务器重启后,原有数据库丢失且集群 ID 发生变化,导致无法访问原有数据库。对于企业版用户,如果已针对集群 ID 进行授权,则会发现集群服务器的机器码未变,但原有的授权已失效。如果未进行监控或者未及时发现并进行处理,则不会注意到数据库已经丢失,从而造成损失,增加运维成本。 +问题影响:服务器重启后,原有数据库丢失(注:并非真正丢失,只是原有的数据磁盘未挂载,暂时看不到)且集群 ID 发生变化,导致无法访问原有数据库。对于企业版用户,如果已针对集群 ID 进行授权,还会发现集群服务器的机器码未变,但原有的授权已失效。如果未针对该问题进行监控或者未及时发现并进行处理,则不会注意到原有数据库已经丢失,从而造成损失,增加运维成本。 问题解决:应在 fstab 文件中配置 dataDir 目录的自动挂载,确保 dataDir 始终指向预期的挂载点和目录,此时,再重启服务器,会找回原有的数据库和集群。在后续的版本中,我们将开发一个功能,使 taosd 在检测到启动前后 dataDir 发生变化时,在启动阶段退出,同时提供相应的错误提示。 \ No newline at end of file From f41e96baca0fb7ae8ab300d2b79b92424832a296 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 12 Dec 2024 11:51:01 +0800 Subject: [PATCH 04/24] docs: add faq of why database disappear and clusterId change --- docs/zh/27-train-faq/01-faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 58e3c452c8..0a519770f8 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -291,7 +291,7 @@ https://docs.taosdata.com/reference/components/taosd/#%E7%9B%91%E6%8E%A7%E7%9B%B 如仍无法解决,则需要联系涛思技术人员支持。 ### 32 同一台服务器,数据库的数据目录 dataDir 不变,为什么原有数据库丢失且集群 ID 发生了变化? -背景知识:TDengine 服务端进程(taosd)在启动时,若数据目录(dataDir,该目录在配置文件 taos.cfg 中指定)下不存在有效的数据文件子目录(如 mnode、dnode 和 vnode 等),则会自动创建这些目录。在创建新的 mnode 目录的同时,会分配一个新的集群 ID,从而创建一个新的集群。 +背景知识:TDengine 服务端进程(taosd)在启动时,若数据目录(dataDir,该目录在配置文件 taos.cfg 中指定)下不存在有效的数据文件子目录(如 mnode、dnode 和 vnode 等),则会自动创建这些目录。在创建新的 mnode 目录的同时,会分配一个新的集群 ID,从而产生一个新的集群。 原因分析:taosd 的数据目录 dataDir 可以指向多个不同的挂载点。如果这些挂载点未在 fstab 文件中配置自动挂载,服务器重启后,dataDir 将仅作为一个本地磁盘的普通目录存在,而未能按预期指向挂载的磁盘。此时,若 taosd 服务启动,它将在 dataDir 下新建目录,从而产生一个新的集群。 From b28930f3352ef7da5cf46b422b57837c98836e7e Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 12 Dec 2024 11:52:47 +0800 Subject: [PATCH 05/24] docs: add faq of why database disappear and clusterId change --- docs/zh/27-train-faq/01-faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 0a519770f8..ece7c9f309 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -295,6 +295,6 @@ https://docs.taosdata.com/reference/components/taosd/#%E7%9B%91%E6%8E%A7%E7%9B%B 原因分析:taosd 的数据目录 dataDir 可以指向多个不同的挂载点。如果这些挂载点未在 fstab 文件中配置自动挂载,服务器重启后,dataDir 将仅作为一个本地磁盘的普通目录存在,而未能按预期指向挂载的磁盘。此时,若 taosd 服务启动,它将在 dataDir 下新建目录,从而产生一个新的集群。 -问题影响:服务器重启后,原有数据库丢失(注:并非真正丢失,只是原有的数据磁盘未挂载,暂时看不到)且集群 ID 发生变化,导致无法访问原有数据库。对于企业版用户,如果已针对集群 ID 进行授权,还会发现集群服务器的机器码未变,但原有的授权已失效。如果未针对该问题进行监控或者未及时发现并进行处理,则不会注意到原有数据库已经丢失,从而造成损失,增加运维成本。 +问题影响:服务器重启后,原有数据库丢失(注:并非真正丢失,只是原有的数据磁盘未挂载,暂时看不到)且集群 ID 发生变化,导致无法访问原有数据库。对于企业版用户,如果已针对集群 ID 进行授权,还会发现集群服务器的机器码未变,但原有的授权已失效。如果未针对该问题进行监控或者未及时发现并进行处理,则用户不会注意到原有数据库已经丢失,从而造成损失,增加运维成本。 问题解决:应在 fstab 文件中配置 dataDir 目录的自动挂载,确保 dataDir 始终指向预期的挂载点和目录,此时,再重启服务器,会找回原有的数据库和集群。在后续的版本中,我们将开发一个功能,使 taosd 在检测到启动前后 dataDir 发生变化时,在启动阶段退出,同时提供相应的错误提示。 \ No newline at end of file From 217c4c55067c761f07028b47223f96e9621be2a7 Mon Sep 17 00:00:00 2001 From: Yibo Liu Date: Thu, 12 Dec 2024 15:29:39 +0800 Subject: [PATCH 06/24] Update 12-tdinsight.md --- .../01-components/12-tdinsight.md | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/docs/en/14-reference/01-components/12-tdinsight.md b/docs/en/14-reference/01-components/12-tdinsight.md index 12423c512d..c9464760e7 100644 --- a/docs/en/14-reference/01-components/12-tdinsight.md +++ b/docs/en/14-reference/01-components/12-tdinsight.md @@ -171,7 +171,37 @@ Metric details: 5. **Writes**: Total number of writes 6. **Other**: Total number of other requests -There are also line charts for the above categories. +There are also line charts for the above categories. + +### Automatic import of preconfigured alert rules + +After summarizing user experience, 14 commonly used alert rules are sorted out. These alert rules can monitor key indicators of the TDengine cluster and report alerts, such as abnormal and exceeded indicators. +Starting from TDengine-Server 3.3.4.3 (TDengine-datasource 3.6.3), TDengine Datasource supports automatic import of preconfigured alert rules. You can import 14 alert rules to Grafana (version 11 or later) with one click. +In the TDengine-datasource setting interface, turn on the "Load Tengine Alert" switch, click the "Save & test" button, the plugin will automatically load the mentioned 14 alert rules. The rules will be placed in the Grafana alerts directory. If not required, turn off the "Load TDengine Alert" switch, and click the button next to "Clear TDengine Alert" to clear all the alert rules imported into this data source. + +After importing, click on "Alert rules" on the left side of the Grafana interface to view all current alert rules. By configuring contact points, users can receive alert notifications. + +The specific configuration of the 14 alert rules is as follows: + +| alert rule| Rule threshold| Behavior when no data | Data scanning interval |Duration | SQL | +| ------ | --------- | ---------------- | ----------- |------- |----------------------| +|CPU load of dnode node|average > 80%|Trigger alert|5 minutes|5 minutes |`select now(), dnode_id, last(cpu_system) as cup_use from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts < now partition by dnode_id having first(_ts) > 0 `| +|Memory of dnode node |average > 60%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, last(mem_engine) / last(mem_total) * 100 as taosd from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts 80%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, data_dir_level, data_dir_name, last(used) / last(total) * 100 as used from log.taosd_dnodes_data_dirs where _ts >= (now - 5m) and _ts < now partition by dnode_id, data_dir_level, data_dir_name`| +|Authorization expires |< 60天|Trigger alert|1 day|0 0 seconds|`select now(), cluster_id, last(grants_expire_time) / 86400 as expire_time from log.taosd_cluster_info where _ts >= (now - 24h) and _ts < now partition by cluster_id having first(_ts) > 0 `| +|The used measurement points has reached the authorized number|>= 90%|Trigger alert|1 day|0 seconds|`select now(), cluster_id, CASE WHEN max(grants_timeseries_total) > 0.0 THEN max(grants_timeseries_used) /max(grants_timeseries_total) * 100.0 ELSE 0.0 END AS result from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1m) > 0`| +|Number of concurrent query requests | > 100|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries`| +|Maximum time for slow query execution (no time window) |> 300秒|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries where exec_usec>300000000`| +|dnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(dnodes_total) - last(dnodes_alive) as dnode_offline from log.taosd_cluster_info where _ts >= (now -30s) and _ts < now partition by cluster_id having first(_ts) > 0`| +|vnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(vnodes_total) - last(vnodes_alive) as vnode_offline from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having first(_ts) > 0 `| +|Number of data deletion requests |> 0|Do not trigger alert|30 seconds|0 seconds|``select now(), count(`count`) as `delete_count` from log.taos_sql_req where sql_type = 'delete' and _ts >= (now -30s) and _ts < now``| +|Adapter RESTful request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=0 and ts >= (now -30s) and ts < now``| +|Adapter WebSocket request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=1 and ts >= (now -30s) and ts < now``| +|Dnode data reporting is missing |< 3|Trigger alert|180 seconds|0 seconds|`select now(), cluster_id, count(*) as dnode_report from log.taosd_cluster_info where _ts >= (now -180s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1h) > 0`| +|Restart dnode |max(update_time) > last(update_time)|Trigger alert|90 seconds|0 seconds|`select now(), dnode_id, max(uptime) - last(uptime) as dnode_restart from log.taosd_dnodes_info where _ts >= (now - 90s) and _ts < now partition by dnode_id`| + +TDengine users can modify and improve these alert rules according to their own business needs. In Grafana 7.5 and below versions, the Dashboard and Alert rules functions are combined, while in subsequent new versions, the two functions are separated. To be compatible with Grafana7.5 and below versions, an Alert Used Only panel has been added to the TDinsight panel, which is only required for Grafana7.5 and below versions. + ## Upgrade From 69a40355ed7977222702c94d4151db1b1ff6a0fc Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 12 Dec 2024 15:35:32 +0800 Subject: [PATCH 07/24] fix compile error --- source/libs/stream/src/streamBackendRocksdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 11d2385d1c..30a5bb76db 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -4631,7 +4631,7 @@ int32_t compareHashTableImpl(SHashObj* p1, SHashObj* p2, SArray* diff) { if (fname == NULL) { return terrno; } - tstrncpy(fname, name, strlen(name)); + tstrncpy(fname, name, strlen(name) + 1); if (taosArrayPush(diff, &fname) == NULL) { taosMemoryFree(fname); return terrno; From c7f22d23df08f81de9f41edacb273bb128602aad Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 12 Dec 2024 15:36:44 +0800 Subject: [PATCH 08/24] fix compile error --- source/libs/stream/src/streamBackendRocksdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 30a5bb76db..55b8365dbb 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -4825,7 +4825,7 @@ int32_t dbChkpGetDelta(SDbChkp* p, int64_t chkpId, SArray* list) { return terrno; } - tstrncpy(fname, name, strlen(name)); + tstrncpy(fname, name, strlen(name) + 1); if (taosArrayPush(p->pAdd, &fname) == NULL) { taosMemoryFree(fname); TAOS_UNUSED(taosThreadRwlockUnlock(&p->rwLock)); From 137430a3ed3a16fe0dc6119545eb057859f793ee Mon Sep 17 00:00:00 2001 From: Yibo Liu Date: Thu, 12 Dec 2024 16:13:02 +0800 Subject: [PATCH 09/24] Update index.md --- docs/en/27-train-faq/index.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/en/27-train-faq/index.md b/docs/en/27-train-faq/index.md index a5ec977e29..ca6cd91714 100644 --- a/docs/en/27-train-faq/index.md +++ b/docs/en/27-train-faq/index.md @@ -286,4 +286,14 @@ This connection only reports the most basic information that does not involve an This feature is an optional configuration item, which is enabled by default in the open-source version. The specific parameter is telemetryReporting, as explained in the [official documentation](../tdengine-reference/components/taosd/). You can disable this parameter at any time by modifying telemetryReporting to 0 in taos.cfg, then restarting the database service. Code located at: [https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c](https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c). -Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational. +Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational. + +### 31 What should I do if I encounter 'Sync leader is unreachable' when connecting to the cluster for the first time? + +Reporting this error indicates that the first connection to the cluster was successful, but the IP address accessed for the first time was not the leader of mnode. An error occurred when the client attempted to establish a connection with the leader. The client searches for the leader node through EP, which specifies the fqdn and port number. There are two common reasons for this error: + +- The ports of other dnodes in the cluster are not open +- The client's hosts file is not configured correctly + +Therefore, first, check whether all ports on the server and cluster (default 6030 for native connections and 6041 for HTTP connections) are open; Next, check if the client's hosts file has configured the fqdn and IP information for all dnodes in the cluster. +If the issue still cannot be resolved, it is necessary to contact Taos technical personnel for support. From 8605f1c32a1618dcd1dc8127b3803c0d7229206f Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Wed, 11 Dec 2024 19:59:12 +0800 Subject: [PATCH 10/24] fix(query)[TD-33181]. reset error code when retry in vnodeGetBufPoolToUse --- source/dnode/vnode/src/vnd/vnodeCommit.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 3ebcf50858..28d27b8893 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -103,9 +103,11 @@ static int32_t vnodeGetBufPoolToUse(SVnode *pVnode) { } code = taosThreadCondTimedWait(&pVnode->poolNotEmpty, &pVnode->mutex, &ts); - if (code && code != TSDB_CODE_TIMEOUT_ERROR) { - TSDB_CHECK_CODE(code, lino, _exit); + // ignore timeout error and retry + if (code == TSDB_CODE_TIMEOUT_ERROR) { + code = TSDB_CODE_SUCCESS; } + TSDB_CHECK_CODE(code, lino, _exit); } } } From 63705d9b832ff893d3dd1a7ef15f34bea4b45b7f Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 12 Dec 2024 17:25:46 +0800 Subject: [PATCH 11/24] fix compile error --- source/libs/stream/src/streamBackendRocksdb.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 55b8365dbb..f7cac3b562 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -4627,11 +4627,12 @@ int32_t compareHashTableImpl(SHashObj* p1, SHashObj* p2, SArray* diff) { while (pIter) { char* name = taosHashGetKey(pIter, &len); if (!isBkdDataMeta(name, len) && !taosHashGet(p1, name, len)) { - char* fname = taosMemoryCalloc(1, len + 1); + int32_t cap = len + 1; + char* fname = taosMemoryCalloc(1, cap); if (fname == NULL) { return terrno; } - tstrncpy(fname, name, strlen(name) + 1); + tstrncpy(fname, name, cap); if (taosArrayPush(diff, &fname) == NULL) { taosMemoryFree(fname); return terrno; @@ -4819,13 +4820,14 @@ int32_t dbChkpGetDelta(SDbChkp* p, int64_t chkpId, SArray* list) { size_t len = 0; char* name = taosHashGetKey(pIter, &len); if (name != NULL && !isBkdDataMeta(name, len)) { - char* fname = taosMemoryCalloc(1, len + 1); + int32_t cap = len + 1; + char* fname = taosMemoryCalloc(1, cap); if (fname == NULL) { TAOS_UNUSED(taosThreadRwlockUnlock(&p->rwLock)); return terrno; } - tstrncpy(fname, name, strlen(name) + 1); + tstrncpy(fname, name, cap); if (taosArrayPush(p->pAdd, &fname) == NULL) { taosMemoryFree(fname); TAOS_UNUSED(taosThreadRwlockUnlock(&p->rwLock)); From 8136363fbe33cb5a78bbd9d4511c2755b4200344 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 Dec 2024 11:14:21 +0800 Subject: [PATCH 12/24] refact: merge unnecessary code. --- source/dnode/mnode/impl/src/mndMnode.c | 36 +++++++++----------------- 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 413fd3aec5..5ea0c342d5 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -951,12 +951,8 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB STR_WITH_MAXSIZE_TO_VARSTR(b2, role, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)b2, false); - if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); - sdbCancelFetch(pSdb, pShow->pIter); - sdbRelease(pSdb, pObj); - goto _out; - } + if (code != 0) goto _err; + const char *status = "ready"; if (objStatus == SDB_STATUS_CREATING) status = "creating"; if (objStatus == SDB_STATUS_DROPPING) status = "dropping"; @@ -965,31 +961,16 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB STR_WITH_MAXSIZE_TO_VARSTR(b3, status, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)b3, false); - if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); - sdbCancelFetch(pSdb, pShow->pIter); - sdbRelease(pSdb, pObj); - goto _out; - } + if (code != 0) goto _err; pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->createdTime, false); - if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); - sdbCancelFetch(pSdb, pShow->pIter); - sdbRelease(pSdb, pObj); - goto _out; - } + if (code != 0) goto _err; int64_t roleTimeMs = (isDnodeOnline) ? pObj->roleTimeMs : 0; pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); code = colDataSetVal(pColInfo, numOfRows, (const char *)&roleTimeMs, false); - if (code != 0) { - mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); - sdbCancelFetch(pSdb, pShow->pIter); - sdbRelease(pSdb, pObj); - goto _out; - } + if (code != 0) goto _err; numOfRows++; sdbRelease(pSdb, pObj); @@ -1000,6 +981,13 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB _out: sdbRelease(pSdb, pSelfObj); return numOfRows; + +_err: + mError("mnode:%d, failed to set col data val since %s", pObj->id, tstrerror(code)); + sdbCancelFetch(pSdb, pShow->pIter); + sdbRelease(pSdb, pObj); + sdbRelease(pSdb, pSelfObj); + return numOfRows; } static void mndCancelGetNextMnode(SMnode *pMnode, void *pIter) { From 0170cdb5cbfa829f885987942474007b3db1ceb9 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Fri, 13 Dec 2024 11:51:54 +0800 Subject: [PATCH 13/24] docs/add the units of configuration cacheload and cachesize in Update 02-database.md --- docs/zh/14-reference/03-taos-sql/02-database.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md index fabebc44da..867e1d7aa1 100644 --- a/docs/zh/14-reference/03-taos-sql/02-database.md +++ b/docs/zh/14-reference/03-taos-sql/02-database.md @@ -134,11 +134,11 @@ alter_database_option: { 1. 如何查看 cachesize? -通过 select * from information_schema.ins_databases; 可以查看这些 cachesize 的具体值。 +通过 select * from information_schema.ins_databases; 可以查看这些 cachesize 的具体值(单位为 MB)。。 2. 如何查看 cacheload? -通过 show \.vgroups; 可以查看 cacheload +通过 show \.vgroups; 可以查看 cacheload(单位为字节)。 3. 判断 cachesize 是否够用 From 7658144359f114ac7a8691ff19008cf64404ad6d Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Fri, 13 Dec 2024 09:27:42 +0800 Subject: [PATCH 14/24] fix:[TS-5763] Fix error when using selection function with JSON param. --- source/libs/function/src/builtinsimpl.c | 63 +++++++++-- .../test_selection_function_with_json.py | 106 ++++++++++++++++++ tests/parallel_test/cases.task | 1 + 3 files changed, 162 insertions(+), 8 deletions(-) create mode 100644 tests/army/query/function/test_selection_function_with_json.py diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 83227dea9e..aa0711f421 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2449,13 +2449,21 @@ static int32_t doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t SFirstLastRes* pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (IS_VAR_DATA_TYPE(type)) { - pInfo->bytes = varDataTLen(pData); + if (type == TSDB_DATA_TYPE_JSON) { + pInfo->bytes = getJsonValueLen(pData); + } else { + pInfo->bytes = varDataTLen(pData); + } } (void)memcpy(pInfo->buf, pData, pInfo->bytes); if (pkData != NULL) { if (IS_VAR_DATA_TYPE(pInfo->pkType)) { - pInfo->pkBytes = varDataTLen(pkData); + if (pInfo->pkType == TSDB_DATA_TYPE_JSON) { + pInfo->pkBytes = getJsonValueLen(pkData); + } else { + pInfo->pkBytes = varDataTLen(pkData); + } } (void)memcpy(pInfo->buf + pInfo->bytes, pkData, pInfo->pkBytes); pInfo->pkData = pInfo->buf + pInfo->bytes; @@ -2985,7 +2993,11 @@ static int32_t doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex pInfo->isNull = false; if (IS_VAR_DATA_TYPE(pInputCol->info.type)) { - pInfo->bytes = varDataTLen(pData); + if (pInputCol->info.type == TSDB_DATA_TYPE_JSON) { + pInfo->bytes = getJsonValueLen(pData); + } else { + pInfo->bytes = varDataTLen(pData); + } } (void)memcpy(pInfo->buf, pData, pInfo->bytes); @@ -2994,7 +3006,11 @@ static int32_t doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex if (pCtx->hasPrimaryKey && !colDataIsNull_s(pkCol, rowIndex)) { char* pkData = colDataGetData(pkCol, rowIndex); if (IS_VAR_DATA_TYPE(pInfo->pkType)) { - pInfo->pkBytes = varDataTLen(pkData); + if (pInfo->pkType == TSDB_DATA_TYPE_JSON) { + pInfo->pkBytes = getJsonValueLen(pkData); + } else { + pInfo->pkBytes = varDataTLen(pkData); + } } (void)memcpy(pInfo->buf + pInfo->bytes, pkData, pInfo->pkBytes); pInfo->pkData = pInfo->buf + pInfo->bytes; @@ -5872,7 +5888,11 @@ void modeFunctionCleanupExt(SqlFunctionCtx* pCtx) { static int32_t saveModeTupleData(SqlFunctionCtx* pCtx, char* data, SModeInfo *pInfo, STuplePos* pPos) { if (IS_VAR_DATA_TYPE(pInfo->colType)) { - (void)memcpy(pInfo->buf, data, varDataTLen(data)); + if (pInfo->colType == TSDB_DATA_TYPE_JSON) { + (void)memcpy(pInfo->buf, data, getJsonValueLen(data)); + } else { + (void)memcpy(pInfo->buf, data, varDataTLen(data)); + } } else { (void)memcpy(pInfo->buf, data, pInfo->colBytes); } @@ -5882,7 +5902,16 @@ static int32_t saveModeTupleData(SqlFunctionCtx* pCtx, char* data, SModeInfo *pI static int32_t doModeAdd(SModeInfo* pInfo, int32_t rowIndex, SqlFunctionCtx* pCtx, char* data) { int32_t code = TSDB_CODE_SUCCESS; - int32_t hashKeyBytes = IS_STR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes; + int32_t hashKeyBytes; + if (IS_VAR_DATA_TYPE(pInfo->colType)) { + if (pInfo->colType == TSDB_DATA_TYPE_JSON) { + hashKeyBytes = getJsonValueLen(data); + } else { + hashKeyBytes = varDataTLen(data); + } + } else { + hashKeyBytes = pInfo->colBytes; + } SModeItem* pHashItem = (SModeItem *)taosHashGet(pInfo->pHash, data, hashKeyBytes); if (pHashItem == NULL) { @@ -6654,14 +6683,32 @@ static void doSaveRateInfo(SRateInfo* pRateInfo, bool isFirst, int64_t ts, char* pRateInfo->firstValue = v; pRateInfo->firstKey = ts; if (pRateInfo->firstPk) { - int32_t pkBytes = IS_VAR_DATA_TYPE(pRateInfo->pkType) ? varDataTLen(pk) : pRateInfo->pkBytes; + int32_t pkBytes; + if (IS_VAR_DATA_TYPE(pRateInfo->pkType)) { + if (pRateInfo->pkType == TSDB_DATA_TYPE_JSON) { + pkBytes = getJsonValueLen(pk); + } else { + pkBytes = varDataTLen(pk); + } + } else { + pkBytes = pRateInfo->pkBytes; + } (void)memcpy(pRateInfo->firstPk, pk, pkBytes); } } else { pRateInfo->lastValue = v; pRateInfo->lastKey = ts; if (pRateInfo->lastPk) { - int32_t pkBytes = IS_VAR_DATA_TYPE(pRateInfo->pkType) ? varDataTLen(pk) : pRateInfo->pkBytes; + int32_t pkBytes; + if (IS_VAR_DATA_TYPE(pRateInfo->pkType)) { + if (pRateInfo->pkType == TSDB_DATA_TYPE_JSON) { + pkBytes = getJsonValueLen(pk); + } else { + pkBytes = varDataTLen(pk); + } + } else { + pkBytes = pRateInfo->pkBytes; + } (void)memcpy(pRateInfo->lastPk, pk, pkBytes); } } diff --git a/tests/army/query/function/test_selection_function_with_json.py b/tests/army/query/function/test_selection_function_with_json.py new file mode 100644 index 0000000000..e1f8090ae3 --- /dev/null +++ b/tests/army/query/function/test_selection_function_with_json.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from frame import etool +from frame.etool import * +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.common import * + +class TDTestCase(TBase): + updatecfgDict = { + "keepColumnName": "1", + "ttlChangeOnWrite": "1", + "querySmaOptimize": "1", + "slowLogScope": "none", + "queryBufferSize": 10240 + } + + def insert_data(self): + tdLog.info(f"insert data.") + tdSql.execute("drop database if exists ts_5763;") + tdSql.execute("create database ts_5763;") + tdSql.execute("use ts_5763;") + tdSql.execute("select database();") + tdSql.execute("CREATE STABLE metrics (ts TIMESTAMP, v DOUBLE) TAGS (labels JSON)") + tdSql.execute("""CREATE TABLE `metrics_0` USING `metrics` (`labels`) TAGS ('{"ident":"192.168.56.167"}');""") + tdSql.execute("""CREATE TABLE `metrics_1` USING `metrics` (`labels`) TAGS ('{"ident":"192.168.56.168"}');""") + tdSql.execute("""CREATE TABLE `metrics_2` USING `metrics` (`labels`) TAGS ('{"ident":"192.168.56.169"}');""") + tdSql.execute("""CREATE TABLE `metrics_3` USING `metrics` (`labels`) TAGS ('{"ident":"192.168.56.170"}');""") + tdSql.execute("""CREATE TABLE `metrics_5` USING `metrics` (`labels`) TAGS ('{"asset_name":"中国政务网"}');""") + tdSql.execute("""CREATE TABLE `metrics_6` USING `metrics` (`labels`) TAGS ('{"asset_name":"地大物博阿拉丁快解放啦上课交电费"}');""") + tdSql.execute("""CREATE TABLE `metrics_7` USING `metrics` (`labels`) TAGS ('{"asset_name":"no1241-上的六块腹肌阿斯利康的肌肤轮廓设计大方"}');""") + tdSql.execute("""CREATE TABLE `metrics_8` USING `metrics` (`labels`) TAGS ('{"asset_name":"no1241-上的六块腹肌阿斯利康的肌肤轮廓设计大方","ident":"192.168.0.1"}');""") + tdSql.execute("""CREATE TABLE `metrics_9` USING `metrics` (`labels`) TAGS ('{"asset_name":"no1241-上的六块腹肌阿斯利康的肌肤轮廓设计大方","ident":"192.168.0.1"}');""") + tdSql.execute("""CREATE TABLE `metrics_10` USING `metrics` (`labels`) TAGS ('{"asset_name":"上的咖啡机no1241-上的六块腹肌阿斯利康的肌肤轮廓设计大方","ident":"192.168.0.1"}');""") + + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:39.326',1)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:40.891',2)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:41.986',3)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:42.992',4)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:46.927',5)") + tdSql.execute("insert into metrics_0 values ('2024-12-12 16:34:48.473',6)") + tdSql.execute("insert into metrics_1 select * from metrics_0") + tdSql.execute("insert into metrics_2 select * from metrics_0") + tdSql.execute("insert into metrics_3 select * from metrics_0") + tdSql.execute("insert into metrics_5 select * from metrics_0") + tdSql.execute("insert into metrics_6 select * from metrics_0") + tdSql.execute("insert into metrics_7 select * from metrics_0") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:36.459',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:37.388',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:37.622',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:37.852',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:38.081',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:38.307',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:38.535',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:38.792',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:39.035',1)") + tdSql.execute("insert into metrics_8 values ('2024-12-12 19:05:39.240',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:29.270',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:30.508',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:31.035',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:31.523',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:31.760',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.001',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.228',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.453',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.690',1)") + tdSql.execute("insert into metrics_9 values ('2024-12-12 19:05:32.906',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:14.538',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:15.114',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:15.613',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:15.853',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.054',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.295',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.514',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.731',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:16.958',1)") + tdSql.execute("insert into metrics_10 values ('2024-12-12 19:06:17.176',1)") + + for i in range(1, 10): + tdSql.query("select _wstart,first(v)-last(v), first(labels->'asset_name'),first(labels->'ident'),mode(labels->'asset_name'),mode(labels->'ident'),last(labels->'asset_name'),last(labels->'ident') from ts_5763.metrics interval(1s)") + tdSql.checkRows(18) + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + self.insert_data() + + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 879d93ab3a..e82fd5a85d 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -15,6 +15,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_function.py +,,y,army,./pytest.sh python3 ./test.py -f query/function/test_selection_function_with_json.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_percentile.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_resinfo.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_interp.py From bf878a557ed0318e082ebc5b6325340b073fb32e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 16:20:26 +0800 Subject: [PATCH 15/24] Update 02-management.md --- docs/zh/06-advanced/06-TDgpt/02-management.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index ef1206fc04..07203b977d 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -37,7 +37,7 @@ systemctl status taosanoded |/usr/local/taos/taosanode/bin|可执行文件目录| |/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/| |/usr/local/taos/taosanode/lib|库文件目录| -|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model| +|/usr/local/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model| |/var/log/taos/taosanode/|日志文件目录| |/etc/taos/taosanode.ini|配置文件| From 3c90b9ac8af7d610df3ee3276c2d2d7f5adbba19 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 16:20:57 +0800 Subject: [PATCH 16/24] Update 03-preprocess.md --- docs/zh/06-advanced/06-TDgpt/03-preprocess.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/03-preprocess.md b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md index 9efd2bdf11..b63cae0740 100644 --- a/docs/zh/06-advanced/06-TDgpt/03-preprocess.md +++ b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md @@ -12,7 +12,7 @@ import wndata from './pic/white-noise-data.png' 预处理流程 TDgpt 首先对输入数据进行白噪声检查(White Noise Data check), 检查通过以后针对预测分析,还要进行输入(历史)数据的重采样和时间戳对齐处理(异常检测跳过数据重采样和时间戳对齐步骤)。 -预处理完成以后,再进行预测或异常检测操作。预处理过程部署于预测或异常检测处理逻辑的一部分。 +预处理完成以后,再进行预测或异常检测操作。预处理过程不属于预测或异常检测处理逻辑的一部分。 ### 白噪声检查 From 1c63cd75b10abef88ae7eccb692f576a805d2411 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 16:21:38 +0800 Subject: [PATCH 17/24] Update index.md --- docs/zh/06-advanced/06-TDgpt/04-forecast/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md index c7388ab9c0..a1149772d0 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md @@ -14,7 +14,7 @@ description: 预测算法 ```bash taos> select * from foo; - ts | k | + ts | i32 | ======================================== 2020-01-01 00:00:12.681 | 13 | 2020-01-01 00:00:13.727 | 14 | From 4f878078bdf4770fd224e9566f2410258a2fd0ba Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 16:35:52 +0800 Subject: [PATCH 18/24] Update index.md --- docs/zh/06-advanced/06-TDgpt/04-forecast/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md index a1149772d0..3981fff8c6 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md @@ -42,7 +42,7 @@ algo=expr1 ``` 1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。 -2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下: +2. `options`:预测函数的参数。字符串类型,其中使用 K=V 方式调用算法及相关参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下: ### 参数说明 From 697b8749ddd1215b5f895daea9a9978387eaf0b5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 16:43:52 +0800 Subject: [PATCH 19/24] Update index.md --- docs/zh/06-advanced/06-TDgpt/06-dev/index.md | 43 ++++++++------------ 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md index 072a66c7d3..bcd972df8e 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md @@ -19,24 +19,25 @@ Anode的主要目录结构如下图所示 ```bash . +├── bin ├── cfg -├── model -│   └── ad_autoencoder -├── release -├── script -└── taosanalytics - ├── algo - │   ├── ad - │   └── fc - ├── misc - └── test +├── lib +│   └── taosanalytics +│   ├── algo +│   │   ├── ad +│   │   └── fc +│   ├── misc +│   └── test +├── log -> /var/log/taos/taosanode +├── model -> /var/lib/taos/taosanode/model +└── venv -> /var/lib/taos/taosanode/venv ``` |目录|说明| |---|---| |taosanalytics| 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 保存异常检测算法代码,fc 目录保存预测算法代码| -|script|是安装脚本和发布脚本放置目录| +|venv| Python 虚拟环境| |model|放置针对数据集完成的训练模型| |cfg|配置文件目录| @@ -63,7 +64,8 @@ Anode采用算法自动加载模式,因此只识别符合命名约定的 Pytho ```SQL --- algo 后面的参数 name 即为类属性 `name` -SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name') +SELECT COUNT(*) +FROM foo ANOMALY_WINDOW(col_name, 'algo=name') ``` ## 添加具有模型的分析算法 @@ -76,19 +78,10 @@ SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name') ```bash . -├── cfg -├── model -│   └── ad_autoencoder -│   ├── ad_autoencoder_foo.dat -│   └── ad_autoencoder_foo.info -├── release -├── script -└── taosanalytics - ├── algo - │   ├── ad - │   └── fc - ├── misc - └── test +└── model + └── ad_autoencoder + ├── ad_autoencoder_foo.dat + └── ad_autoencoder_foo.info ``` From afd11de8fe36f505beaf5f3a62c2eb5b45201a49 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 16:49:53 +0800 Subject: [PATCH 20/24] Update 02-management.md --- docs/zh/06-advanced/06-TDgpt/02-management.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index 07203b977d..c41dade3d3 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -4,11 +4,10 @@ sidebar_label: "安装部署" --- ### 环境准备 -使用 TDgpt 的高级时序数据分析功能需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 可以运行在 Linux/Windows/MacOS 等平台上,同时需要 3.10 或以上版本的 Python 环境支持。 +使用 TDgpt 的高级时序数据分析功能需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 运行在 Linux 平台上,并需要 3.10 或以上版本的 Python 环境支持。 > 部署 Anode 需要 TDengine Enterprise 3.3.4.3 及以后版本,请首先确认搭配 Anode 使用的 TDengine 能够支持 Anode。 ### 安装及卸载 -不同操作系统上安装及部署 Anode 有一些差异,主要是卸载操作、安装路径、服务启停等方面。本文以 Linux 系统为例,说明安装部署的流程。 使用 Linux 环境下的安装包 TDengine-enterprise-anode-1.x.x.tar.gz 可进行 Anode 的安装部署工作,命令如下: ```bash From b55108105904626e28857769f4273179f79ab4b4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 17:18:16 +0800 Subject: [PATCH 21/24] Update 03-ad.md --- docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md index 5b49db330e..c48ce42836 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md @@ -16,7 +16,7 @@ sidebar_label: "异常检测" ```python import numpy as np -from service import AbstractAnomalyDetectionService +from taosanalytics.service import AbstractAnomalyDetectionService # 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束 class _MyAnomalyDetectionService(AbstractAnomalyDetectionService): From 5b829b76ecfed09f1fc10d699cd039c87e67fdf9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 17:24:13 +0800 Subject: [PATCH 22/24] Update 02-forecast.md --- docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md index 954076c8fd..7f4f81034e 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md @@ -34,7 +34,7 @@ return { ```python import numpy as np -from service import AbstractForecastService +from taosanalytics.service import AbstractForecastService # 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束 class _MyForecastService(AbstractForecastService): From 0b2705d68022467c9ca01ab0016ac5876b9c83ea Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 17:36:20 +0800 Subject: [PATCH 23/24] Update 02-management.md --- docs/zh/06-advanced/06-TDgpt/02-management.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index c41dade3d3..b37c39944f 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -63,7 +63,7 @@ pidfile = /usr/local/taos/taosanode/taosanode.pid # conflict with systemctl, so do NOT uncomment this # daemonize = /var/log/taos/taosanode/taosanode.log -# log directory +# uWSGI log files logto = /var/log/taos/taosanode/taosanode.log # wWSGI monitor port @@ -73,7 +73,7 @@ stats = 127.0.0.1:8387 virtualenv = /usr/local/taos/taosanode/venv/ [taosanode] -# default app log file +# default taosanode log file app-log = /var/log/taos/taosanode/taosanode.app.log # model storage directory From 5ff10141f558fd25564a28814f485e7af9b076dc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Dec 2024 18:15:41 +0800 Subject: [PATCH 24/24] Update 02-forecast.md --- .../06-TDgpt/06-dev/02-forecast.md | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md index 7f4f81034e..0584c87311 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md @@ -36,6 +36,7 @@ return { import numpy as np from taosanalytics.service import AbstractForecastService + # 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束 class _MyForecastService(AbstractForecastService): """ 定义类,从 AbstractForecastService 继承并实现其定义的抽象方法 execute """ @@ -51,12 +52,12 @@ class _MyForecastService(AbstractForecastService): super().__init__() def execute(self): - """ 算法逻辑的核心实现""" + """ 算法逻辑的核心实现""" res = [] """这个预测算法固定返回 1 作为预测值,预测值的数量是用户通过 self.fc_rows 指定""" ts_list = [self.start_ts + i * self.time_step for i in range(self.fc_rows)] - res.app(ts_list) # 设置预测结果时间戳列 + res.append(ts_list) # 设置预测结果时间戳列 """生成全部为 1 的预测结果 """ res_list = [1] * self.fc_rows @@ -64,18 +65,18 @@ class _MyForecastService(AbstractForecastService): """检查用户输入,是否要求返回预测置信区间上下界""" if self.return_conf: - """对于没有计算预测置信区间上下界的算法,直接返回预测值作为上下界即可""" - bound_list = [1] * self.fc_rows - res.append(bound_list) # 预测结果置信区间下界 - res.append(bound_list) # 预测结果执行区间上界 + """对于没有计算预测置信区间上下界的算法,直接返回预测值作为上下界即可""" + bound_list = [1] * self.fc_rows + res.append(bound_list) # 预测结果置信区间下界 + res.append(bound_list) # 预测结果执行区间上界 """返回结果""" - return { "res": res, "mse": 0} + return {"res": res, "mse": 0} - def set_params(self, params): - """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑""" - pass + """该算法无需任何输入参数,直接调用父类函数,不处理算法参数设置逻辑""" + return super().set_params(params) + ``` 将该文件保存在 `./taosanalytics/algo/fc/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口中执行 `SHOW ANODES FULL` 能够看到新加入的算法。应用就可以通过 SQL 语句调用该预测算法。