diff --git a/.github/workflows/taosd-ci-build.yml b/.github/workflows/taosd-ci-build.yml
index 372008b585..4972aebebe 100644
--- a/.github/workflows/taosd-ci-build.yml
+++ b/.github/workflows/taosd-ci-build.yml
@@ -11,7 +11,7 @@ on:
- 'packaging/**'
- 'tests/**'
- '*.md'
-
+
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
@@ -43,15 +43,33 @@ jobs:
if: runner.os == 'Linux'
run: |
sudo apt update -y
- sudo apt install -y build-essential cmake \
- libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev \
- zlib1g-dev pkg-config libssl-dev gawk
+ sudo apt install -y \
+ build-essential \
+ cmake \
+ gawk \
+ libgeos-dev \
+ libjansson-dev \
+ liblzma-dev \
+ libsnappy-dev \
+ libssl-dev \
+ libz-dev \
+ pkg-config \
+ zlib1g
- name: Install dependencies on macOS
if: runner.os == 'macOS'
run: |
brew update
- brew install argp-standalone gflags pkg-config snappy zlib geos jansson gawk openssl
+ brew install \
+ argp-standalone \
+ gawk \
+ gflags \
+ geos \
+ jansson \
+ openssl \
+ pkg-config \
+ snappy \
+ zlib
- name: Build and install TDengine
run: |
@@ -80,7 +98,7 @@ jobs:
run: |
taosBenchmark -t 10 -n 10 -y
taos -s "select count(*) from test.meters"
-
+
- name: Clean up
if: always()
run: |
diff --git a/README-CN.md b/README-CN.md
index ca046dbe13..40e97de2ba 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -69,6 +69,8 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
+如果你想要编译 taosAdapter 或者 taosKeeper,需要安装 Go 1.18 及以上版本。
+
## 3.1 Linux系统
@@ -153,6 +155,10 @@ cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
make
```
+如果你想要编译 taosAdapter,需要添加 `-DBUILD_HTTP=false` 选项。
+
+如果你想要编译 taosKeeper,需要添加 `--DBUILD_KEEPER=true` 选项。
+
可以使用Jemalloc作为内存分配器,而不是使用glibc:
```bash
@@ -180,6 +186,10 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
+如果你想要编译 taosAdapter,需要添加 `-DBUILD_HTTP=false` 选项。
+
+如果你想要编译 taosKeeper,需要添加 `--DBUILD_KEEPER=true` 选项。
+
## 4.3 Windows系统上构建
diff --git a/README.md b/README.md
index 6cfd1980b1..b90a8da5bf 100644
--- a/README.md
+++ b/README.md
@@ -82,6 +82,8 @@ For contributing/building/testing TDengine Connectors, please check the followin
At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
+If you want to compile taosAdapter or taosKeeper, you need to install Go 1.18 or above.
+
## 3.1 On Linux
@@ -168,6 +170,10 @@ cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
make
```
+If you want to compile taosAdapter, you need to add the `-DBUILD_HTTP=false` option.
+
+If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` option.
+
You can use Jemalloc as memory allocator instead of glibc:
```bash
@@ -196,6 +202,10 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
+If you want to compile taosAdapter, you need to add the `-DBUILD_HTTP=false` option.
+
+If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` option.
+
## 4.3 Build on Windows
diff --git a/cmake/cmake.define b/cmake/cmake.define
index a794d927ad..d8809420b1 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -258,3 +258,11 @@ ELSE()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF()
ENDIF()
+
+# IF(${JEMALLOC_ENABLED})
+# MESSAGE(STATUS "JEMALLOC_ENABLED Enabled")
+# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=attributes")
+# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=attributes")
+# ELSE()
+# MESSAGE(STATUS "JEMALLOC_ENABLED Disabled")
+# ENDIF()
\ No newline at end of file
diff --git a/docs/en/10-third-party/01-collection/flink.md b/docs/en/10-third-party/01-collection/flink.md
index b225a2d610..698f5644f8 100644
--- a/docs/en/10-third-party/01-collection/flink.md
+++ b/docs/en/10-third-party/01-collection/flink.md
@@ -26,6 +26,7 @@ Flink Connector supports all platforms that can run Flink 1.19 and above version
| Flink Connector Version | Major Changes | TDengine Version|
|-------------------------| ------------------------------------ | ---------------- |
+| 2.1.0 | Fix the issue of writing varchar types from different data sources.| - |
| 2.0.2 | The Table Sink supports types such as RowKind.UPDATE_BEFORE, RowKind.UPDATE_AFTER, and RowKind.DELETE.| - |
| 2.0.1 | Sink supports writing types from Rowdata implementations.| - |
| 2.0.0 | 1.Support SQL queries on data in TDengine database.
2. Support CDC subscription to data in TDengine database.
3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.1 and higher|
@@ -86,7 +87,8 @@ TDengine currently supports timestamp, number, character, and boolean types, and
| SMALLINT | Short |
| TINYINT | Byte |
| BOOL | Boolean |
-| BINARY | byte[] |
+| VARCHAR | StringData |
+| BINARY | StringData |
| NCHAR | StringData |
| JSON | StringData |
| VARBINARY | byte[] |
@@ -116,7 +118,7 @@ If using Maven to manage a project, simply add the following dependencies in pom
com.taosdata.flink
flink-connector-tdengine
- 2.0.2
+ 2.1.0
```
diff --git a/docs/en/14-reference/05-connector/40-csharp.md b/docs/en/14-reference/05-connector/40-csharp.md
index 01f4f0e81d..e3db8bc351 100644
--- a/docs/en/14-reference/05-connector/40-csharp.md
+++ b/docs/en/14-reference/05-connector/40-csharp.md
@@ -25,6 +25,7 @@ import RequestId from "../../assets/resources/_request_id.mdx";
| Connector Version | Major Changes | TDengine Version |
|-------------------|------------------------------------------------------------|--------------------|
+| 3.1.6 | Optimize WebSocket connection message handling. | - |
| 3.1.5 | Fix WebSocket encoding error for Chinese character length. | - |
| 3.1.4 | Improved WebSocket query and insert performance. | 3.3.2.0 and higher |
| 3.1.3 | Supported WebSocket auto-reconnect. | - |
@@ -39,25 +40,25 @@ For error reporting in other TDengine modules, please refer to [Error Codes](../
## Data Type Mapping
-| TDengine DataType | C# Type |
-|-------------------|------------------|
-| TIMESTAMP | DateTime |
-| TINYINT | sbyte |
-| SMALLINT | short |
-| INT | int |
-| BIGINT | long |
-| TINYINT UNSIGNED | byte |
-| SMALLINT UNSIGNED | ushort |
-| INT UNSIGNED | uint |
-| BIGINT UNSIGNED | ulong |
-| FLOAT | float |
-| DOUBLE | double |
-| BOOL | bool |
-| BINARY | byte[] |
-| NCHAR | string (utf-8 encoded) |
-| JSON | byte[] |
-| VARBINARY | byte[] |
-| GEOMETRY | byte[] |
+| TDengine DataType | C# Type |
+|-------------------|----------|
+| TIMESTAMP | DateTime |
+| TINYINT | sbyte |
+| SMALLINT | short |
+| INT | int |
+| BIGINT | long |
+| TINYINT UNSIGNED | byte |
+| SMALLINT UNSIGNED | ushort |
+| INT UNSIGNED | uint |
+| BIGINT UNSIGNED | ulong |
+| FLOAT | float |
+| DOUBLE | double |
+| BOOL | bool |
+| BINARY | byte[] |
+| NCHAR | string |
+| JSON | byte[] |
+| VARBINARY | byte[] |
+| GEOMETRY | byte[] |
**Note**: JSON type is only supported in tags.
The GEOMETRY type is binary data in little endian byte order, conforming to the WKB standard. For more details, please refer to [Data Types](../../sql-manual/data-types/)
diff --git a/docs/examples/flink/Main.java b/docs/examples/flink/Main.java
index 50a507d1de..b6a0daa9f4 100644
--- a/docs/examples/flink/Main.java
+++ b/docs/examples/flink/Main.java
@@ -198,7 +198,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
", current: " + rowData.getFloat(1) +
", voltage: " + rowData.getInt(2) +
", phase: " + rowData.getFloat(3) +
- ", location: " + new String(rowData.getBinary(4)));
+ ", location: " + rowData.getString(4).toString());
sb.append("\n");
return sb.toString();
});
@@ -273,7 +273,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
", current: " + row.getFloat(1) +
", voltage: " + row.getInt(2) +
", phase: " + row.getFloat(3) +
- ", location: " + new String(row.getBinary(4)));
+ ", location: " + rowData.getString(4).toString());
sb.append("\n");
totalVoltage.addAndGet(row.getInt(2));
}
@@ -311,7 +311,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
", current: " + rowData.getFloat(1) +
", voltage: " + rowData.getInt(2) +
", phase: " + rowData.getFloat(3) +
- ", location: " + new String(rowData.getBinary(4)));
+ ", location: " + rowData.getString(4).toString());
sb.append("\n");
totalVoltage.addAndGet(rowData.getInt(2));
return sb.toString();
@@ -353,7 +353,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
", current: " + row.getFloat(1) +
", voltage: " + row.getInt(2) +
", phase: " + row.getFloat(3) +
- ", location: " + new String(row.getBinary(4)));
+ ", location: " + rowData.getString(4).toString());
sb.append("\n");
totalVoltage.addAndGet(row.getInt(2));
}
@@ -489,9 +489,9 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
" `current` FLOAT," +
" voltage INT," +
" phase FLOAT," +
- " location VARBINARY," +
+ " location VARCHAR(255)," +
" groupid INT," +
- " tbname VARBINARY" +
+ " tbname VARCHAR(255)" +
") WITH (" +
" 'connector' = 'tdengine-connector'," +
" 'td.jdbc.url' = 'jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata'," +
@@ -506,9 +506,9 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
" `current` FLOAT," +
" voltage INT," +
" phase FLOAT," +
- " location VARBINARY," +
+ " location VARCHAR(255)," +
" groupid INT," +
- " tbname VARBINARY" +
+ " tbname VARCHAR(255)" +
") WITH (" +
" 'connector' = 'tdengine-connector'," +
" 'td.jdbc.mode' = 'sink'," +
@@ -535,9 +535,9 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
" `current` FLOAT," +
" voltage INT," +
" phase FLOAT," +
- " location VARBINARY," +
+ " location VARCHAR(255)," +
" groupid INT," +
- " tbname VARBINARY" +
+ " tbname VARCHAR(255)" +
") WITH (" +
" 'connector' = 'tdengine-connector'," +
" 'bootstrap.servers' = 'localhost:6041'," +
@@ -554,9 +554,9 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
" `current` FLOAT," +
" voltage INT," +
" phase FLOAT," +
- " location VARBINARY," +
+ " location VARCHAR(255)," +
" groupid INT," +
- " tbname VARBINARY" +
+ " tbname VARCHAR(255)" +
") WITH (" +
" 'connector' = 'tdengine-connector'," +
" 'td.jdbc.mode' = 'cdc'," +
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 02c6105d43..9014a24598 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -8,7 +8,7 @@ TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-se
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论你在工作中是什么角色,请您仔细阅读[数据模型](./basic/model)一章。
-如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。
+如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、写入、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[连接器](./reference/connector)一章。
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[运维管理](./operation)一章。
@@ -16,7 +16,7 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
-如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinternal)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。
+如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinternal)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看 TDengine 在 GitHub 的源代码,对 TDengine 的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
diff --git a/docs/zh/02-concept.md b/docs/zh/02-concept.md
index 17acc50892..96e5a306cf 100644
--- a/docs/zh/02-concept.md
+++ b/docs/zh/02-concept.md
@@ -9,7 +9,7 @@ toc_max_heading_level: 4
时序数据,即时间序列数据(Time-Series Data),它们是一组按照时间发生先后顺序进行排列的序列数据。日常生活中,设备、传感器采集的数据就是时序数据,证券交易的记录也是时序数据。因此时序数据的处理并不陌生,特别在是工业自动化以及证券金融行业,专业的时序数据处理软件早已存在,比如工业领域的 PI System 以及金融行业的 KDB。
-这些时序数据是周期、准周期产生的,或事件触发产生的,有的采集频率高,有的采集频率低。一般被发送至服务器中进行汇总并进行实时分析和处理,对系统的运行做出实时监测或预警,对股市行情进行预测。这些数据也可以被长期保存下来,用以进行离线数据分析。比如统计时间区间内设备的运行节奏与产出,分析如何进一步优化配置来提升生产效率;统计一段时间内生产过程中的成本分布,分析如何降低生产成本;统计一段时间内的设备异常值,结合业务分析潜在的安全隐患,以降低故障时长等等。
+这些时序数据是周期、准周期产生的,或事件触发产生的,有的采集频率高,有的采集频率低。一般被发送至服务器进行汇总并进行实时分析和处理,对系统的运行做出实时监测或预警,对股市行情进行预测。这些数据也可以被长期保存下来,用以进行离线数据分析。比如统计时间区间内设备的运行节奏与产出,分析如何进一步优化配置来提升生产效率;统计一段时间内生产过程中的成本分布,分析如何降低生产成本;统计一段时间内的设备异常值,结合业务分析潜在的安全隐患,以降低故障时长等等。
过去的二十年,随着数据通讯成本的急剧下降,以及各种传感技术和智能设备的出现,特别是物联网与工业 4.0 的推动,工业、物联网企业为了监测设备、环境、生产线及整个系统的运行状态,在各个关键点都配有传感器,采集各种数据。从手环、共享出行、智能电表、环境监测设备到电梯、数控机床、挖掘机、工业生产线等都在源源不断的产生海量的实时数据,时序数据的体量正指数级的增长。以智能电表为例,智能电表每隔 15 分钟采集一次数据,每天会自动生成 96 条记录。现在全中国已经有超过 10 亿台智能电表,一天就产生 960 亿条时序数据。一台联网的汽车往往每隔 10 到 15 秒采集一次数据发到云端,那么一天下来就很容易产生 1000 条记录。假设中国有 2 亿车辆联网,它们每天将产生总计 2000 亿条甚至更多的时序数据。
@@ -33,7 +33,7 @@ toc_max_heading_level: 4
7. 用户关注的是一段时间的趋势:对于一条银行交易记录,或者一条微博、微信,对于它的用户而言,每一条都很重要。但对于物联网、工业时序数据,每个数据点与数据点的变化并不大,大家关心的更多是一段时间,比如过去五分钟、一小时数据变化的趋势,不会只针对一个时间点进行。
-8. 数据是有保留期限的:采集的数据一般都有基于时长的保留策略,比如仅仅保留一天、一周、一个月、一年甚至更长时间,该类数据的价值往往是由时间段决定的,因此对于不在重要时间段内的数据,都是可以被视为过期数据整块删除的。
+8. 数据是有保留期限的:采集的数据一般都有基于时长的保留策略,比如仅仅保留一天、一周、一个月、一年甚至更长时间,该类数据的价值往往是由时间段决定的,因此对于不在重要时间段内的数据,都是可以被视为过期数据整块删除的。
9. 需要实时分析计算操作:对于大部分互联网大数据应用,更多的是离线分析,即使有实时分析,但要求并不高。比如用户画像、可以积累一定的用户行为数据后进行,早一天晚一天画不会特别影响结果。但是对于工业、物联网的平台应用以及交易系统,对数据的实时计算要求就往往很高,因为需要根据计算结果进行实时报警、监控,从而避免事故的发生、决策时机的错过。
@@ -47,7 +47,7 @@ toc_max_heading_level: 4
1. 电力能源领域:电力能源领域范围较大,不论是在发电、输电、配电、用电还是其他环节中,各种电力设备都会产生大量时序数据,以风力发电为例,风电机作为大型设备,拥有可能高达数百的数据采集点,因此每日所产生的时序数据量极其之大,对这些数据的监控分析是确保发电环节准确无误的必要工作。在用电环节,对智能电表实时采集回来的电流、电压等数据进行快速计算,实时了解最新的用电总量、尖、峰、平、谷用电量,判断设备是否正常工作。有些时候,电力系统可能需要拉取历史上某一年的全量数据,通过机器学习等技术分析用户的用电习惯、进行负荷预测、节能方案设计、帮助电力公司合理规划电力的供应。或者拉取上个月的尖峰平谷用电量,根据不同价位进行周期性的电费结算,以上都是时序数据在电力能源领域的典型应用。
-2. 车联网/轨道交通领域:车辆的 GPS 、速度、油耗、故障信息等,都是典型的时序数据,通过对它们科学合理地数据分析,可以为车辆管理和优化提供强有力的支持。但是,不同车型采集的点位信息从数百点到数千点之间不一而同,随着联网的交通设备数量越来越多,这些海量的时序数据如何安全上传、数据存储、查询和分析,成为了一个亟待解决的行业问题。对于交通工具的本身,科学合理地处理时序数据可以实现车辆轨迹追踪、无人驾驶、故障预警等功能。对于交通工具的整体配套服务,也可以提供良好的支持。比如,在新一代的智能地铁管理系统中,通过地铁站中各种传感器的时序数据采集分析,可以在站中实时展示各个车厢的拥挤度、温度、舒适度等数据,让用户可以自行选择体验度最佳的出行方案,对于地铁运营商,也可以更好地实现乘客流量的调度管理。
+2. 车联网/轨道交通领域:车辆的 GPS 、速度、油耗、故障信息等,都是典型的时序数据,通过科学合理地数据分析,可以为车辆管理和优化提供强有力的支持。但是,不同车型采集的点位信息从数百点到数千点之间不一而同,随着联网的交通设备数量越来越多,这些海量的时序数据如何安全上传、数据存储、查询和分析,成为了一个亟待解决的行业问题。对于交通工具的本身,科学合理地处理时序数据可以实现车辆轨迹追踪、无人驾驶、故障预警等功能。对于交通工具的整体配套服务,也可以提供良好的支持。比如,在新一代的智能地铁管理系统中,通过地铁站中各种传感器的时序数据采集分析,可以在站中实时展示各个车厢的拥挤度、温度、舒适度等数据,让用户可以自行选择体验度最佳的出行方案,对于地铁运营商,也可以更好地实现乘客流量的调度管理。
3. 智能制造领域:过去的十几年间,许多传统工业企业的数字化得到了长足的发展,单个工厂从传统的几千个数据采集点,到如今数十万点、上百万点,部分远程运维场景面临上万设备、千万点的数据采集存储的需求,这些数据都属于典型的时序数据。就整个工业大数据系统而言,时序数据的处理是相当复杂的。以烟草行业的数据采集为例,设备的工业数据协议各式各样、数据采集单位随着设备类型的不同而不同。数据的实时处理能力随着数据采集点的持续增加而难以跟上,与此同时还要兼顾数据的高性能、高可用、可拓展性等等诸多特性。但从另一个角度来看,如果大数据平台能够解决以上困难,满足企业对于时序数据存储分析的需求,就可以帮助企业实现更加智能化、自动化的生产模式,从而完成质的飞升。
@@ -55,7 +55,7 @@ toc_max_heading_level: 4
5. IT 运维领域:IT 领域中,基础设施(如服务器、网络设备、存储设备)、应用程序运行的过程中会产生大量的时序数据。通过对这些时序数据的监控,可以很快地发现基础设施/应用的运行状态和服务可用性,包括系统是否在线、服务是否正常响应等;也能看到具体到某一个具体的点位的性能指标:如 CPU 利用率、内存利用率、磁盘空间利用率、网络带宽利用率等; 还可以监控系统产生的错误日志和异常事件,包括入侵检测、安全事件日志、权限控制等,最终通过设置报警规则,及时通知管理员或运维人员具体的情况,从而及时发现问题、预防故障,并优化系统性能,确保系统稳定可靠地运行。
-6. 金融领域:金融领域目前正经历着数据管理的一场革命,它们的行情数据属于典型的时序数据,由于保留行情数据的储存期限往往需长达 5 至 10 年,甚至超过 30 年,而且可能全世界各个国家/地区的主流金融市场的交易数据都需要全量保存,因此行情数据的总量数据体量庞大,会轻松达到 TB 级别,造成存储、查询等等各方面的瓶颈。在金融领域中,量化交易平台是最能凸显时序数据处理重要性的革命性应用之一:通过对大量时序行情数据的读取分析来及时响应市场变化,帮助交易者把握投资机会,同时规避不必要的风险,实现资产的稳健增长。可以实现包括但不限于:资产管理、情绪监控、股票回测、交易信号模拟、报表自动生成等等诸多功能。
+6. 金融领域:金融领域目前正经历着数据管理的一场革命,行情数据属于典型的时序数据,由于保留行情数据的储存期限往往需长达 5 至 10 年,甚至超过 30 年,而且可能全世界各个国家/地区的主流金融市场的交易数据都需要全量保存,因此行情数据的总量数据体量庞大,会轻松达到 TB 级别,造成存储、查询等等各方面的瓶颈。在金融领域中,量化交易平台是最能凸显时序数据处理重要性的革命性应用之一:通过对大量时序行情数据的读取分析来及时响应市场变化,帮助交易者把握投资机会,同时规避不必要的风险,实现资产的稳健增长。可以实现包括但不限于:资产管理、情绪监控、股票回测、交易信号模拟、报表自动生成等等诸多功能。
## 处理时序数据所需要的工具
@@ -71,11 +71,11 @@ toc_max_heading_level: 4
5. 缓存(Cache):物联网、工业、金融应用需要实时展示一些设备或股票的最新状态,因此平台需要缓存技术提供快速的数据访问。原因是:由于时序数据体量极大,如果不使用缓存技术,而是进行常规的读取、筛选,那么对于监控设备最新状态之类的计算是十分困难的,将会导致很大的延迟,从而失去“实时”的意义。因此,缓存技术是时序数据处理平台不可缺少的一环, Redis 就是这样一种常用的缓存工具。
-处理时序数据需要一系列模块的协同作业,从数据采集到存储、计算、分析与可视化,再到专用的时序数据算法库,每个环节都有相应的工具支持。这些工具的选择取决于具体的业务需求和数据特点,合理地选用和搭配它们才能做到高效地处理各种类型的时序数据,挖掘数据背后的价值。
+处理时序数据需要一系列模块的协同作业,从数据采集到存储、计算、分析与可视化,再到专用的时序数据算法库,每个环节都有相应的工具支持。这些工具的选择取决于具体的业务需求和数据特点,合理地选用和搭配才能做到高效地处理各种类型的时序数据,挖掘数据背后的价值。
## 专用时序数据处理工具的必要性
-在时序数据的十大特征一节中提到,对于一个优秀的时序大数据处理平台来说,它必然需要具备处理时序数据十大特征的能力。在处理时序数据所需要的工具一节中介绍了时序大数据平台处理时序数据所需要的主要模块/组件。 结合这两节的内容与实际情况,可以发现:处理海量时序数据,其实是一个很庞大复杂的系统。
+在时序数据的十大特征一节中提到,对于一个优秀的时序大数据处理平台来说,必然需要具备处理时序数据十大特征的能力。在处理时序数据所需要的工具一节中介绍了时序大数据平台处理时序数据所需要的主要模块/组件。结合这两节的内容与实际情况,可以发现:处理海量时序数据,其实是一个很庞大复杂的系统。
早些年,为处理日益增长的互联网数据,众多的工具开始出现,最流行的便是 Hadoop 体系。除使用大家所熟悉的 Hadoop 组件如 HDFS、MapReduce、HBase 和 Hive 外,通用的大数据处理平台往往还使用 Kafka 或其他消息队列工具,Redis 或其他缓存软件,Flink 或其他实时流式数据处理软件。存储上也有人选用 MongoDB、Cassandra 或其他 NoSQL 数据库。这样一个典型的大数据处理平台基本上能很好的处理互联网行业的引用,比如典型的用户画像、舆情分析等。
diff --git a/docs/zh/03-intro.md b/docs/zh/03-intro.md
index 4207ab4eb6..d146fd62b6 100644
--- a/docs/zh/03-intro.md
+++ b/docs/zh/03-intro.md
@@ -14,7 +14,7 @@ TDengine 是一个高性能、分布式的时序数据库。通过集成的缓
TDengine OSS 是一个开源的高性能时序数据库,与其他时序数据库相比,它的核心优势在于其集群开源、高性能和云原生架构。而且除了基础的写入、查询和存储功能外,TDengine OSS 还集成了缓存、流式计算和数据订阅等高级功能,这些功能显著简化了系统设计,降低了企业的研发和运营成本。
-在 TDengine OSS 的基础上,企业版 TDengine Enterprise 提供了增强的辅助功能,包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。更多的细节请看 [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
+在 TDengine OSS 的基础上,TDengine Enterprise 提供了增强的辅助功能,包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。更多的细节请看 [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)。
此外,TDengine Cloud 作为一种全托管的云服务,存储与计算分离,分开计费,为企业提供了企业级的工具和服务,彻底解决了运维难题,尤其适合中小规模的用户使用。更多的细节请看[TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
@@ -30,19 +30,19 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引
4. 流式计算:TDengine 流式计算引擎提供了实时处理写入的数据流的能力,不仅支持连续查询,还支持事件驱动的流式计算。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟。
-5. 数据订阅:TDengine 提供了类似 Kafka 的数据订阅功能。但用户可以通过 SQL 来灵活控制订阅的数据内容,并使用 Kafka 相同的 API 来订阅一张表、一组表、全部列或部分列、甚至整个数据库的数据。TDengine 可以替代需要集成消息队列产品的场景, 从而简化系统设计的复杂度,降低运营维护成本。
+5. 数据订阅:TDengine 提供了类似 Kafka 的数据订阅功能。但用户可以通过 SQL 来灵活控制订阅的数据内容,并使用和 Kafka 相同的 API 来订阅一张表、一组表、全部列或部分列、甚至整个数据库的数据。TDengine 可以替代需要集成消息队列产品的场景, 从而简化系统设计的复杂度,降低运营维护成本。
-6. 可视化/BI:TDengine 本身不提供可视化或 BI 的功能。但通过其 RESTful API, 标准的 JDBC、ODBC 接口,TDengine 能够 Grafana、Google Data Studio、Power BI、Tableau 以及国产 BI 工具无缝集成。
+6. 可视化/BI:TDengine 本身不提供可视化或 BI 的功能。但通过其 RESTful API, 标准的 JDBC、ODBC 接口,TDengine 能够和 Grafana、Google Data Studio、Power BI、Tableau 以及国产 BI 工具无缝集成。
-7. 集群功能:TDengine 支持集群部署,能够随着业务数据量的增长,通过增加节点线性提升系统处理能力,实现水平扩展。同时,通过多副本技术提供高可用性,并支持 Kubernetes 部署。同时还提供了多种运维工具,方便系统管理员更好地管理和维护集群的健壮运行。
+7. 集群功能:TDengine 支持集群部署,能够随着业务数据量的增长,通过增加节点线性提升系统处理能力,实现水平扩展。同时,通过多副本技术提供高可用性,支持 Kubernetes 部署,提供了多种运维工具,方便系统管理员更好地管理和维护集群的健壮运行。
8. 数据迁移:TDengine 提供了多种便捷的数据导入导出功能,包括脚本文件导入导出、数据文件导入导出、taosdump 工具导入导出等。
-9. 编程连接器:TDengine 提供不同语言的连接器,包括 C/C++、Java、Go、Node.js、Rust、Python、C#、R、PHP 等。这些连接器大多都支持原生连接和 WebSocket 两种连接方式。TDengine 也提供 RESTful 接口,任何语言的应用程序可以直接通过 HTTP 请求访问数据库。
+9. 编程连接器:TDengine 提供多种语言的连接器,包括 C/C++、Java、Go、Node.js、Rust、Python、C#、R、PHP 等。这些连接器大多都支持原生连接和 WebSocket 两种连接方式。TDengine 也提供 RESTful 接口,任何语言的应用程序可以直接通过 HTTP 请求访问数据库。
10. 数据安全:TDengine 提供了丰富的用户管理和权限管理功能以控制不同用户对数据库和表的访问权限,提供了 IP 白名单功能以控制不同帐号只能从特定的服务器接入集群。TDengine 支持系统管理员对不同数据库按需加密,数据加密后对读写完全透明且对性能的影响很小。还提供了审计日志功能以记录系统中的敏感操作。
-11. 常用工具:TDengine 还提供了交互式命令行程序(CLI),便于管理集群、检查系统状态、做即时查询。压力测试工具 taosBenchmark,用于测试 TDengine 的性能。TDengine 还提供了图形化管理界面,简化了操作和管理过程。
+11. 常用工具:TDengine 提供了交互式命令行程序(CLI),便于管理集群、检查系统状态、做即时查询。压力测试工具 taosBenchmark,用于测试 TDengine 的性能。TDengine 还提供了图形化管理界面,简化了操作和管理过程。
12. 零代码数据接入:TDengine 企业版提供了丰富的数据接入功能,依托强大的数据接入平台,无需一行代码,只需要做简单的配置即可实现多种数据源的数据接入,目前已经支持的数据源包括:OPC-UA、OPC-DA、PI、MQTT、Kafka、InfluxDB、OpenTSDB、MySQL、SQL Server、Oracle、Wonderware Historian、MongoDB。
@@ -63,8 +63,11 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引
6. 核心开源:TDengine 的核心代码,包括集群功能,均在开源协议下公开发布。它在 GitHub 网站全球趋势排行榜上多次位居榜首,显示出其受欢迎程度。同时,TDengine 拥有一个活跃的开发者社区,为技术的持续发展和创新提供了有力支持。
采用 TDengine,企业可以在物联网、车联网、工业互联网等典型场景中显著降低大数据平台的总拥有成本,主要体现在以下几个方面:
+
1. 高性能带来的成本节约:TDengine 卓越的写入、查询和存储性能意味着系统所需的计算资源和存储资源可以大幅度减少。这不仅降低了硬件成本,还减少了能源消耗和维护费用。
+
2. 标准化与兼容性带来的成本效益:由于 TDengine 支持标准 SQL,并与众多第三方软件实现了无缝集成,用户可以轻松地将现有系统迁移到 TDengine 上,无须重写大量代码。这种标准化和兼容性大大降低了学习和迁移成本,缩短了项目周期。
+
3. 简化系统架构带来的成本降低:作为一个极简的时序数据平台,TDengine 集成了消息队列、缓存、流计算等必要功能,避免了额外集成众多其他组件的需要。这种简化的系统架构显著降低了系统的复杂度,从而减少了研发和运营成本,提高了整体运营效率。
## 技术生态
@@ -78,7 +81,7 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引
图 1. TDengine 技术生态图
-上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
+上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,它们的数据将被源源不断的写入到 TDengine。右侧是可视化、BI 工具、组态软件、应用程序。下侧是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
## 典型适用场景
diff --git a/docs/zh/10-third-party/01-collection/12-flink.md b/docs/zh/10-third-party/01-collection/12-flink.md
index d74f6cad6d..054d7288a5 100644
--- a/docs/zh/10-third-party/01-collection/12-flink.md
+++ b/docs/zh/10-third-party/01-collection/12-flink.md
@@ -24,6 +24,7 @@ Flink Connector 支持所有能运行 Flink 1.19 及以上版本的平台。
## 版本历史
| Flink Connector 版本 | 主要变化 | TDengine 版本 |
| ------------------| ------------------------------------ | ---------------- |
+| 2.1.0 | 修复不同数据源varchar类型写入问题| - |
| 2.0.2 | Table Sink 支持 RowKind.UPDATE_BEFORE、RowKind.UPDATE_AFTER 和 RowKind.DELETE 类型| - |
| 2.0.1 | Sink 支持对所有继承自 RowData 并已实现的类型进行数据写入| - |
| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据
2. 支持 CDC 订阅 TDengine 数据库中的数据
3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.1 及以上版本 |
@@ -84,7 +85,8 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Flink RowD
| SMALLINT | Short |
| TINYINT | Byte |
| BOOL | Boolean |
-| BINARY | byte[] |
+| VARCHAR | StringData |
+| BINARY | StringData |
| NCHAR | StringData |
| JSON | StringData |
| VARBINARY | byte[] |
@@ -113,7 +115,7 @@ env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE);
com.taosdata.flink
flink-connector-tdengine
- 2.0.2
+ 2.1.0
```
diff --git a/docs/zh/14-reference/01-components/02-taosc.md b/docs/zh/14-reference/01-components/02-taosc.md
index 09235557c8..262027bff6 100755
--- a/docs/zh/14-reference/01-components/02-taosc.md
+++ b/docs/zh/14-reference/01-components/02-taosc.md
@@ -490,7 +490,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
- 支持版本:从 v3.3.4.3 版本开始引入
#### bypassFlag
-- 说明:配置文件所在目录
+- 说明:用于短路测试 `内部参数`
- 类型:整数;
- 取值范围:0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回
- 默认值:0
diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md
index 9e42f240d0..35cb99a3dd 100644
--- a/docs/zh/14-reference/03-taos-sql/02-database.md
+++ b/docs/zh/14-reference/03-taos-sql/02-database.md
@@ -37,6 +37,9 @@ database_option: {
| WAL_FSYNC_PERIOD value
| WAL_RETENTION_PERIOD value
| WAL_RETENTION_SIZE value
+ | COMPACT_INTERVAL value
+ | COMPACT_TIME_RANGE value
+ | COMPACT_TIME_OFFSET value
}
```
@@ -81,6 +84,10 @@ database_option: {
- WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。
- WAL_RETENTION_SIZE:为了数据订阅消费,需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
+- COMPACT_INTERVAL:自动 compact 触发周期(从 1970-01-01T00:00:00Z 开始切分的时间周期)。取值范围:0 或 [10m, keep2],单位:m(分钟),h(小时),d(天)。不加时间单位默认单位为天,默认值为 0,即不触发自动 compact 功能。如果 db 中有未完成的 compact 任务,不重复下发 compact 任务。仅企业版 3.3.5.0 版本开始支持。
+- COMPACT_TIME_RANGE:自动 compact 任务触发的 compact 时间范围,取值范围:[-keep2, -duration],单位:m(分钟),h(小时),d(天)。不加时间单位时默认单位为天,默认值为 [0, 0]。取默认值 [0, 0] 时,如果 COMPACT_INTERVAL 大于 0,会按照 [-keep2, -duration] 下发自动 compact。因此,要关闭自动 compact 功能,需要将 COMPACT_INTERVAL 设置为 0。仅企业版 3.3.5.0 版本开始支持。
+- COMPACT_TIME_OFFSET:自动 compact 任务触发的 compact 时间相对本地时间的偏移量。取值范围:[0,23],单位: h(小时),默认值为 0。以 UTC 0 时区为例,如果 COMPACT_INTERVAL 为 1d,当 COMPACT_TIME_OFFSET 为 0 时,在每天 0 点下发自动 compact,如果 COMPACT_TIME_OFFSET 为 2,在每天 2 点下发自动 compact。仅企业版 3.3.5.0 版本开始支持。
+-
### 创建数据库示例
@@ -127,6 +134,9 @@ alter_database_option: {
| WAL_RETENTION_PERIOD value
| WAL_RETENTION_SIZE value
| MINROWS value
+ | COMPACT_INTERVAL value
+ | COMPACT_TIME_RANGE value
+ | COMPACT_TIME_OFFSET value
}
```
diff --git a/docs/zh/14-reference/05-connector/40-csharp.mdx b/docs/zh/14-reference/05-connector/40-csharp.mdx
index f58b243689..89bba2e443 100644
--- a/docs/zh/14-reference/05-connector/40-csharp.mdx
+++ b/docs/zh/14-reference/05-connector/40-csharp.mdx
@@ -22,14 +22,15 @@ import RequestId from "./_request_id.mdx";
## 版本历史
-| Connector 版本 | 主要变化 | TDengine 版本 |
-|:-------------|:---------------------------|:--------------|
-| 3.1.5 | 修复 websocket 协议编码中文时长度错误 | - |
-| 3.1.4 | 提升 websocket 查询和写入性能 | 3.3.2.0 及更高版本 |
-| 3.1.3 | 支持 WebSocket 自动重连 | - |
-| 3.1.2 | 修复 schemaless 资源释放 | - |
-| 3.1.1 | 支持 varbinary 和 geometry 类型 | - |
-| 3.1.0 | WebSocket 使用原生实现 | 3.2.1.0 及更高版本 |
+| Connector 版本 | 主要变化 | TDengine 版本 |
+|:-------------|:----------------------------|:--------------|
+| 3.1.6 | 优化 WebSocket 连接接收消息处理。 | - |
+| 3.1.5 | 修复 WebSocket 协议编码中文时长度错误。 | - |
+| 3.1.4 | 提升 WebSocket 查询和写入性能。 | 3.3.2.0 及更高版本 |
+| 3.1.3 | 支持 WebSocket 自动重连。 | - |
+| 3.1.2 | 修复 schemaless 资源释放。 | - |
+| 3.1.1 | 支持 varbinary 和 geometry 类型。 | - |
+| 3.1.0 | WebSocket 使用原生实现。 | 3.2.1.0 及更高版本 |
## 处理异常
@@ -53,14 +54,14 @@ TDengine 其他功能模块的报错,请参考 [错误码](../../../reference/
| DOUBLE | double |
| BOOL | bool |
| BINARY | byte[] |
-| NCHAR | string (utf-8编码) |
+| NCHAR | string |
| JSON | byte[] |
| VARBINARY | byte[] |
| GEOMETRY | byte[] |
**注意**:JSON 类型仅在 tag 中支持。
-GEOMETRY类型是 little endian 字节序的二进制数据,符合 WKB 规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型)
-WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/)
+GEOMETRY类型是 little endian 字节序的二进制数据,符合 WKB 规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型)。
+WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/)。
## 示例程序汇总
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 0d4af75d04..11bdb16eca 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -38,7 +38,6 @@
#include "tversion.h"
#include "tconv.h"
-
#include "cus_name.h"
#define TSC_VAR_NOT_RELEASE 1
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 05898ea26e..2fd16b4f67 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -959,7 +959,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey[0], CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint[0], CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
- TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
+ TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_LOCAL));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3PageCacheSize", tsS3PageCacheSize, 4, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3UploadDelaySec", tsS3UploadDelaySec, 1, 60 * 60 * 24 * 30, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL));
diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c
index 51e4d86acb..bd7da3f4d6 100644
--- a/source/dnode/mgmt/exe/dmMain.c
+++ b/source/dnode/mgmt/exe/dmMain.c
@@ -21,12 +21,15 @@
#include "tglobal.h"
#include "version.h"
#include "tconv.h"
-#ifdef TD_JEMALLOC_ENABLED
-#include "jemalloc/jemalloc.h"
-#endif
#include "dmUtil.h"
#include "tcs.h"
#include "qworker.h"
+
+#ifdef TD_JEMALLOC_ENABLED
+#define ALLOW_FORBID_FUNC
+#include "jemalloc/jemalloc.h"
+#endif
+
#include "cus_name.h"
// clang-format off
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index 3677fc5616..234d4f41e1 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -1016,15 +1016,15 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_CHECK_POINT_SOURCE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_GET_STREAM_PROGRESS, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_UPDATE_CHKPT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
index 00fa2a8c95..d71e0b02c4 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
@@ -396,7 +396,8 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal,
tqNotifyClose(pVnode->pImpl->pTq);
- dInfo("vgId:%d, wait for vnode stream queue:%p is empty", pVnode->vgId, pVnode->pStreamQ);
+ dInfo("vgId:%d, wait for vnode stream queue:%p is empty, %d remains", pVnode->vgId,
+ pVnode->pStreamQ, taosQueueItemSize(pVnode->pStreamQ));
while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(10);
dInfo("vgId:%d, wait for vnode stream ctrl queue:%p is empty", pVnode->vgId, pVnode->pStreamCtrlQ);
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index b752d35b0a..6ff6e2726f 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -1239,51 +1239,22 @@ int32_t extractStreamNodeList(SMnode *pMnode) {
}
static int32_t mndCheckTaskAndNodeStatus(SMnode *pMnode) {
- bool ready = true;
+ int32_t code = 0;
if (mndStreamNodeIsUpdated(pMnode)) {
- TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS);
+ return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
}
streamMutexLock(&execInfo.lock);
if (taosArrayGetSize(execInfo.pNodeList) == 0) {
mDebug("stream task node change checking done, no vgroups exist, do nothing");
if (taosArrayGetSize(execInfo.pTaskList) != 0) {
- streamMutexUnlock(&execInfo.lock);
mError("stream task node change checking done, no vgroups exist, but task list is not empty");
- return TSDB_CODE_FAILED;
- }
- }
-
- for (int32_t i = 0; i < taosArrayGetSize(execInfo.pTaskList); ++i) {
- STaskId *p = taosArrayGet(execInfo.pTaskList, i);
- if (p == NULL) {
- continue;
- }
-
- STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, p, sizeof(*p));
- if (pEntry == NULL) {
- continue;
- }
-
- if (pEntry->status != TASK_STATUS__READY) {
- mDebug("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s, checkpoint not issued", pEntry->id.streamId,
- (int32_t)pEntry->id.taskId, pEntry->nodeId, streamTaskGetStatusStr(pEntry->status));
- ready = false;
- break;
- }
-
- if (pEntry->hTaskId != 0) {
- mDebug("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s related fill-history task:0x%" PRIx64
- " exists, checkpoint not issued",
- pEntry->id.streamId, (int32_t)pEntry->id.taskId, pEntry->nodeId, streamTaskGetStatusStr(pEntry->status),
- pEntry->hTaskId);
- ready = false;
- break;
+ code = TSDB_CODE_STREAM_TASK_IVLD_STATUS;
}
}
streamMutexUnlock(&execInfo.lock);
- return ready ? 0 : -1;
+ return code;
}
int64_t getStreamTaskLastReadyState(SArray *pTaskList, int64_t streamId) {
@@ -1297,7 +1268,22 @@ int64_t getStreamTaskLastReadyState(SArray *pTaskList, int64_t streamId) {
continue;
}
- if (pEntry->status == TASK_STATUS__READY && ts < pEntry->startTime) {
+ // -1 denote not ready now or never ready till now
+ if (pEntry->hTaskId != 0) {
+ mInfo("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s related fill-history task:0x%" PRIx64
+ " exists, checkpoint not issued",
+ pEntry->id.streamId, (int32_t)pEntry->id.taskId, pEntry->nodeId, streamTaskGetStatusStr(pEntry->status),
+ pEntry->hTaskId);
+ return -1;
+ }
+
+ if (pEntry->status != TASK_STATUS__READY) {
+ mInfo("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s, not ready for checkpoint", pEntry->id.streamId,
+ (int32_t)pEntry->id.taskId, pEntry->nodeId, streamTaskGetStatusStr(pEntry->status));
+ return -1;
+ }
+
+ if (ts < pEntry->startTime) {
ts = pEntry->startTime;
taskId = pEntry->id.taskId;
}
@@ -1330,11 +1316,11 @@ static bool isStreamReadyHelp(int64_t now, SStreamObj* pStream) {
int64_t lastReadyTs = getStreamTaskLastReadyState(execInfo.pTaskList, pStream->uid);
if ((lastReadyTs == -1) || ((lastReadyTs != -1) && ((now - lastReadyTs) < tsStreamCheckpointInterval * 1000))) {
+
if (lastReadyTs != -1) {
- mInfo("not start checkpoint, stream:0x%"PRIx64" last ready ts:%"PRId64" ready duration:%"PRId64" less than threshold",
- pStream->uid, lastReadyTs, now - lastReadyTs);
- } else {
- mInfo("not start checkpoint, stream:0x%"PRIx64" not ready now", pStream->uid);
+ mInfo("not start checkpoint, stream:0x%" PRIx64 " last ready ts:%" PRId64 " ready duration:%" PRId64
+ "ms less than threshold",
+ pStream->uid, lastReadyTs, (now - lastReadyTs));
}
ready = false;
@@ -1355,7 +1341,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
int32_t numOfCheckpointTrans = 0;
if ((code = mndCheckTaskAndNodeStatus(pMnode)) != 0) {
- TAOS_RETURN(TSDB_CODE_STREAM_TASK_IVLD_STATUS);
+ return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
}
SArray *pList = taosArrayInit(4, sizeof(SCheckpointInterval));
@@ -1407,7 +1393,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
}
int32_t numOfQual = taosArrayGetSize(pList);
- if (numOfCheckpointTrans > tsMaxConcurrentCheckpoint) {
+ if (numOfCheckpointTrans >= tsMaxConcurrentCheckpoint) {
mDebug(
"%d stream(s) checkpoint interval longer than %ds, ongoing checkpoint trans:%d reach maximum allowed:%d, new "
"checkpoint trans are not allowed, wait for 30s",
@@ -2713,20 +2699,51 @@ static void doSendQuickRsp(SRpcHandleInfo *pInfo, int32_t msgSize, int32_t vgId,
}
}
+static int32_t doCleanReqList(SArray* pList, SCheckpointConsensusInfo* pInfo) {
+ int32_t alreadySend = taosArrayGetSize(pList);
+
+ for (int32_t i = 0; i < alreadySend; ++i) {
+ int32_t *taskId = taosArrayGet(pList, i);
+ if (taskId == NULL) {
+ continue;
+ }
+
+ for (int32_t k = 0; k < taosArrayGetSize(pInfo->pTaskList); ++k) {
+ SCheckpointConsensusEntry *pe = taosArrayGet(pInfo->pTaskList, k);
+ if ((pe != NULL) && (pe->req.taskId == *taskId)) {
+ taosArrayRemove(pInfo->pTaskList, k);
+ break;
+ }
+ }
+ }
+
+ return alreadySend;
+}
+
int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
int64_t now = taosGetTimestampMs();
+ bool allReady = true;
+ SArray *pNodeSnapshot = NULL;
+ int32_t maxAllowedTrans = 50;
+ int32_t numOfTrans = 0;
+ int32_t code = 0;
+ void *pIter = NULL;
+
+ SArray *pList = taosArrayInit(4, sizeof(int32_t));
+ if (pList == NULL) {
+ return terrno;
+ }
+
SArray *pStreamList = taosArrayInit(4, sizeof(int64_t));
if (pStreamList == NULL) {
+ taosArrayDestroy(pList);
return terrno;
}
mDebug("start to process consensus-checkpointId in tmr");
- bool allReady = true;
- SArray *pNodeSnapshot = NULL;
-
- int32_t code = mndTakeVgroupSnapshot(pMnode, &allReady, &pNodeSnapshot);
+ code = mndTakeVgroupSnapshot(pMnode, &allReady, &pNodeSnapshot);
taosArrayDestroy(pNodeSnapshot);
if (code) {
mError("failed to get the vgroup snapshot, ignore it and continue");
@@ -2735,28 +2752,30 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
if (!allReady) {
mWarn("not all vnodes are ready, end to process the consensus-checkpointId in tmr process");
taosArrayDestroy(pStreamList);
+ taosArrayDestroy(pList);
return 0;
}
streamMutexLock(&execInfo.lock);
- void *pIter = NULL;
while ((pIter = taosHashIterate(execInfo.pStreamConsensus, pIter)) != NULL) {
SCheckpointConsensusInfo *pInfo = (SCheckpointConsensusInfo *)pIter;
- int64_t streamId = -1;
- int32_t num = taosArrayGetSize(pInfo->pTaskList);
- SArray *pList = taosArrayInit(4, sizeof(int32_t));
- if (pList == NULL) {
- continue;
- }
+ taosArrayClear(pList);
+ int64_t streamId = -1;
+ int32_t num = taosArrayGetSize(pInfo->pTaskList);
SStreamObj *pStream = NULL;
+
code = mndGetStreamObj(pMnode, pInfo->streamId, &pStream);
if (pStream == NULL || code != 0) { // stream has been dropped already
mDebug("stream:0x%" PRIx64 " dropped already, continue", pInfo->streamId);
void *p = taosArrayPush(pStreamList, &pInfo->streamId);
- taosArrayDestroy(pList);
+ if (p == NULL) {
+ mError("failed to record the missing stream id in concensus-stream list, streamId:%" PRId64
+ " code:%s, continue",
+ pInfo->streamId, tstrerror(terrno));
+ }
continue;
}
@@ -2766,7 +2785,9 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
continue;
}
- streamId = pe->req.streamId;
+ if (streamId == -1) {
+ streamId = pe->req.streamId;
+ }
int32_t existed = 0;
bool allSame = true;
@@ -2777,7 +2798,7 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
break;
}
- if (((now - pe->ts) >= 10 * 1000) || allSame) {
+ if (((now - pe->ts) >= 10 * 1000) && allSame) {
mDebug("s-task:0x%x sendTs:%" PRId64 " wait %.2fs and all tasks have same checkpointId", pe->req.taskId,
pe->req.startTs, (now - pe->ts) / 1000.0);
if (chkId > pe->req.checkpointId) {
@@ -2785,8 +2806,12 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
taosArrayDestroy(pStreamList);
mError("s-task:0x%x checkpointId:%" PRId64 " is updated to %" PRId64 ", update it", pe->req.taskId,
pe->req.checkpointId, chkId);
+
+ mndReleaseStream(pMnode, pStream);
+ taosHashCancelIterate(execInfo.pStreamConsensus, pIter);
return TSDB_CODE_FAILED;
}
+
code = mndCreateSetConsensusChkptIdTrans(pMnode, pStream, pe->req.taskId, chkId, pe->req.startTs);
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) {
mError("failed to create consensus-checkpoint trans, stream:0x%" PRIx64, pStream->uid);
@@ -2796,7 +2821,6 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
if (p == NULL) {
mError("failed to put into task list, taskId:0x%x", pe->req.taskId);
}
- streamId = pe->req.streamId;
} else {
mDebug("s-task:0x%x sendTs:%" PRId64 " wait %.2fs already, wait for next round to check", pe->req.taskId,
pe->req.startTs, (now - pe->ts) / 1000.0);
@@ -2805,38 +2829,27 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
mndReleaseStream(pMnode, pStream);
- if (taosArrayGetSize(pList) > 0) {
- for (int32_t i = 0; i < taosArrayGetSize(pList); ++i) {
- int32_t *taskId = taosArrayGet(pList, i);
- if (taskId == NULL) {
- continue;
- }
-
- for (int32_t k = 0; k < taosArrayGetSize(pInfo->pTaskList); ++k) {
- SCheckpointConsensusEntry *pe = taosArrayGet(pInfo->pTaskList, k);
- if ((pe != NULL) && (pe->req.taskId == *taskId)) {
- taosArrayRemove(pInfo->pTaskList, k);
- break;
- }
- }
- }
- }
-
- taosArrayDestroy(pList);
+ int32_t alreadySend = doCleanReqList(pList, pInfo);
+ // clear request stream item with empty task list
if (taosArrayGetSize(pInfo->pTaskList) == 0) {
mndClearConsensusRspEntry(pInfo);
if (streamId == -1) {
- streamMutexUnlock(&execInfo.lock);
- taosArrayDestroy(pStreamList);
- mError("streamId is -1, streamId:%" PRIx64, pInfo->streamId);
- return TSDB_CODE_FAILED;
+ mError("streamId is -1, streamId:%" PRIx64" in consensus-checkpointId hashMap, cont", pInfo->streamId);
}
+
void *p = taosArrayPush(pStreamList, &streamId);
if (p == NULL) {
- mError("failed to put into stream list, stream:0x%" PRIx64, streamId);
+ mError("failed to put into stream list, stream:0x%" PRIx64 " not remove it in consensus-chkpt list", streamId);
}
}
+
+ numOfTrans += alreadySend;
+ if (numOfTrans > maxAllowedTrans) {
+ mInfo("already send consensus-checkpointId trans:%d, try next time", alreadySend);
+ taosHashCancelIterate(execInfo.pStreamConsensus, pIter);
+ break;
+ }
}
for (int32_t i = 0; i < taosArrayGetSize(pStreamList); ++i) {
@@ -2851,7 +2864,9 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
streamMutexUnlock(&execInfo.lock);
taosArrayDestroy(pStreamList);
- mDebug("end to process consensus-checkpointId in tmr");
+ taosArrayDestroy(pList);
+
+ mDebug("end to process consensus-checkpointId in tmr, send consensus-checkpoint trans:%d", numOfTrans);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c
index 8703ff96aa..0caaccc28d 100644
--- a/source/dnode/mnode/impl/src/mndStreamUtil.c
+++ b/source/dnode/mnode/impl/src/mndStreamUtil.c
@@ -814,17 +814,18 @@ int32_t mndScanCheckpointReportInfo(SRpcMsg *pReq) {
int32_t mndCreateSetConsensusChkptIdTrans(SMnode *pMnode, SStreamObj *pStream, int32_t taskId, int64_t checkpointId,
int64_t ts) {
- char msg[128] = {0};
+ char msg[128] = {0};
+ STrans *pTrans = NULL;
+ SStreamTask *pTask = NULL;
+
snprintf(msg, tListLen(msg), "set consen-chkpt-id for task:0x%x", taskId);
- STrans *pTrans = NULL;
int32_t code = doCreateTrans(pMnode, pStream, NULL, TRN_CONFLICT_NOTHING, MND_STREAM_CHKPT_CONSEN_NAME, msg, &pTrans);
if (pTrans == NULL || code != 0) {
return terrno;
}
- STaskId id = {.streamId = pStream->uid, .taskId = taskId};
- SStreamTask *pTask = NULL;
+ STaskId id = {.streamId = pStream->uid, .taskId = taskId};
code = mndGetStreamTask(&id, pStream, &pTask);
if (code) {
mError("failed to get task:0x%x in stream:%s, failed to create consensus-checkpointId", taskId, pStream->name);
diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c
index 93347c810f..9b85ca0b45 100644
--- a/source/dnode/vnode/src/meta/metaCache.c
+++ b/source/dnode/vnode/src/meta/metaCache.c
@@ -402,6 +402,7 @@ int32_t metaStatsCacheUpsert(SMeta* pMeta, SMetaStbStats* pInfo) {
if (*ppEntry) { // update
(*ppEntry)->info.ctbNum = pInfo->ctbNum;
+ (*ppEntry)->info.colNum = pInfo->colNum;
} else { // insert
if (pCache->sStbStatsCache.nEntry >= pCache->sStbStatsCache.nBucket) {
TAOS_UNUSED(metaRehashStatsCache(pCache, 1));
diff --git a/source/dnode/vnode/src/meta/metaEntry2.c b/source/dnode/vnode/src/meta/metaEntry2.c
index 4e2c93ec2f..b23059079a 100644
--- a/source/dnode/vnode/src/meta/metaEntry2.c
+++ b/source/dnode/vnode/src/meta/metaEntry2.c
@@ -10,14 +10,16 @@
#include "meta.h"
+extern SDmNotifyHandle dmNotifyHdl;
+
int32_t metaCloneEntry(const SMetaEntry *pEntry, SMetaEntry **ppEntry);
void metaCloneEntryFree(SMetaEntry **ppEntry);
void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey);
int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const SSchema *pSchema);
int metaDelJsonVarFromIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const SSchema *pSchema);
-void metaTimeSeriesNotifyCheck(SMeta *pMeta);
int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
+static void metaTimeSeriesNotifyCheck(SMeta *pMeta);
static int32_t metaGetChildUidsOfSuperTable(SMeta *pMeta, tb_uid_t suid, SArray **childList);
static int32_t metaFetchTagIdxKey(SMeta *pMeta, const SMetaEntry *pEntry, const SSchema *pTagColumn,
STagIdxKey **ppTagIdxKey, int32_t *pTagIdxKeySize);
@@ -990,6 +992,20 @@ static int32_t metaTtlIdxDelete(SMeta *pMeta, const SMetaHandleParam *pParam) {
return code;
}
+static void metaTimeSeriesNotifyCheck(SMeta *pMeta) {
+#if defined(TD_ENTERPRISE)
+ int64_t nTimeSeries = metaGetTimeSeriesNum(pMeta, 0);
+ int64_t deltaTS = nTimeSeries - pMeta->pVnode->config.vndStats.numOfReportedTimeSeries;
+ if (deltaTS > tsTimeSeriesThreshold) {
+ if (0 == atomic_val_compare_exchange_8(&dmNotifyHdl.state, 1, 2)) {
+ if (tsem_post(&dmNotifyHdl.sem) != 0) {
+ metaError("vgId:%d, failed to post semaphore, errno:%d", TD_VID(pMeta->pVnode), errno);
+ }
+ }
+ }
+#endif
+}
+
static int32_t (*metaTableOpFn[META_TABLE_MAX][META_TABLE_OP_MAX])(SMeta *pMeta, const SMetaHandleParam *pParam) =
{
[META_ENTRY_TABLE] =
@@ -1139,6 +1155,7 @@ static int32_t metaHandleNormalTableCreate(SMeta *pMeta, const SMetaEntry *pEntr
metaError("vgId:%d, failed to create table:%s since %s", TD_VID(pMeta->pVnode), pEntry->name, tstrerror(rc));
}
}
+ metaTimeSeriesNotifyCheck(pMeta);
} else {
metaErr(TD_VID(pMeta->pVnode), code);
}
@@ -1214,7 +1231,7 @@ static int32_t metaHandleChildTableCreate(SMeta *pMeta, const SMetaEntry *pEntry
if (ret < 0) {
metaErr(TD_VID(pMeta->pVnode), ret);
}
- pMeta->pVnode->config.vndStats.numOfNTimeSeries += (nCols - 1);
+ pMeta->pVnode->config.vndStats.numOfTimeSeries += (nCols > 0 ? nCols - 1 : 0);
}
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
@@ -1228,7 +1245,7 @@ static int32_t metaHandleChildTableCreate(SMeta *pMeta, const SMetaEntry *pEntry
} else {
metaErr(TD_VID(pMeta->pVnode), code);
}
-
+ metaTimeSeriesNotifyCheck(pMeta);
metaFetchEntryFree(&pSuperEntry);
return code;
}
@@ -1595,6 +1612,10 @@ static int32_t metaHandleSuperTableUpdateImpl(SMeta *pMeta, SMetaHandleParam *pP
}
}
+ if (TSDB_CODE_SUCCESS == code) {
+ metaUpdateStbStats(pMeta, pEntry->uid, 0, pEntry->stbEntry.schemaRow.nCols - pOldEntry->stbEntry.schemaRow.nCols);
+ }
+
return code;
}
@@ -1673,7 +1694,16 @@ static int32_t metaHandleSuperTableUpdate(SMeta *pMeta, const SMetaEntry *pEntry
tsdbCacheInvalidateSchema(pTsdb, pEntry->uid, -1, pEntry->stbEntry.schemaRow.version);
}
-
+ if (updStat) {
+ int64_t ctbNum = 0;
+ int32_t ret = metaGetStbStats(pMeta->pVnode, pEntry->uid, &ctbNum, NULL);
+ if (ret < 0) {
+ metaError("vgId:%d, failed to get stb stats:%s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pEntry->name,
+ pEntry->uid, tstrerror(ret));
+ }
+ pMeta->pVnode->config.vndStats.numOfTimeSeries += (ctbNum * deltaCol);
+ if (deltaCol > 0) metaTimeSeriesNotifyCheck(pMeta);
+ }
metaFetchEntryFree(&pOldEntry);
return code;
}
@@ -1772,7 +1802,9 @@ static int32_t metaHandleNormalTableUpdate(SMeta *pMeta, const SMetaEntry *pEntr
#endif
tsdbCacheInvalidateSchema(pMeta->pVnode->pTsdb, 0, pEntry->uid, pEntry->ntbEntry.schemaRow.version);
}
- metaTimeSeriesNotifyCheck(pMeta);
+ int32_t deltaCol = pEntry->ntbEntry.schemaRow.nCols - pOldEntry->ntbEntry.schemaRow.nCols;
+ pMeta->pVnode->config.vndStats.numOfNTimeSeries += deltaCol;
+ if (deltaCol > 0) metaTimeSeriesNotifyCheck(pMeta);
metaFetchEntryFree(&pOldEntry);
return code;
}
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 2dbc89f58f..25c98d0e56 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -261,20 +261,6 @@ _exception:
return code;
}
-void metaTimeSeriesNotifyCheck(SMeta *pMeta) {
-#if defined(TD_ENTERPRISE)
- int64_t nTimeSeries = metaGetTimeSeriesNum(pMeta, 0);
- int64_t deltaTS = nTimeSeries - pMeta->pVnode->config.vndStats.numOfReportedTimeSeries;
- if (deltaTS > tsTimeSeriesThreshold) {
- if (0 == atomic_val_compare_exchange_8(&dmNotifyHdl.state, 1, 2)) {
- if (tsem_post(&dmNotifyHdl.sem) != 0) {
- metaError("vgId:%d, failed to post semaphore, errno:%d", TD_VID(pMeta->pVnode), errno);
- }
- }
- }
-#endif
-}
-
static int32_t metaDropTables(SMeta *pMeta, SArray *tbUids) {
int32_t code = 0;
if (taosArrayGetSize(tbUids) == 0) return TSDB_CODE_SUCCESS;
diff --git a/source/dnode/vnode/src/meta/metaTable2.c b/source/dnode/vnode/src/meta/metaTable2.c
index 6ff4cd6fdc..abab15ff58 100644
--- a/source/dnode/vnode/src/meta/metaTable2.c
+++ b/source/dnode/vnode/src/meta/metaTable2.c
@@ -378,10 +378,6 @@ static int32_t metaCreateChildTable(SMeta *pMeta, int64_t version, SVCreateTbReq
pReq->ctb.suid, version);
}
return code;
-
-#if 0
- metaTimeSeriesNotifyCheck(pMeta);
-#endif
}
// Drop Child Table
@@ -489,9 +485,6 @@ static int32_t metaCreateNormalTable(SMeta *pMeta, int64_t version, SVCreateTbRe
__func__, __FILE__, __LINE__, tstrerror(code), pReq->uid, pReq->name, version);
}
TAOS_RETURN(code);
-#if 0
- metaTimeSeriesNotifyCheck(pMeta);
-#endif
}
// Drop Normal Table
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index 98ea92125c..7e0b118474 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -420,7 +420,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
reqs.nReqs = taosArrayGetSize(reqs.pArray);
code = tqPutReqToQueue(pVnode, &reqs, encodeCreateChildTableForRPC, TDMT_VND_CREATE_TABLE);
if (code != TSDB_CODE_SUCCESS) {
- tqError("s-task:%s failed to send create table msg", id);
+ tqError("s-task:%s failed to send create table msg, code:%s", id, tstrerror(code));
}
_end:
@@ -859,6 +859,8 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
int32_t vgId = TD_VID(pVnode);
int64_t suid = pTask->outputInfo.tbSink.stbUid;
const char* id = pTask->id.idStr;
+ int32_t timeout = 300; // 5min
+ int64_t start = taosGetTimestampSec();
while (pTableSinkInfo->uid == 0) {
if (streamTaskShouldStop(pTask)) {
@@ -866,6 +868,12 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
return TSDB_CODE_STREAM_EXEC_CANCELLED;
}
+ int64_t waitingDuration = taosGetTimestampSec() - start;
+ if (waitingDuration > timeout) {
+ tqError("s-task:%s wait for table-creating:%s more than %dsec, failed", id, dstTableName, timeout);
+ return TSDB_CODE_PAR_TABLE_NOT_EXIST;
+ }
+
// wait for the table to be created
SMetaReader mr = {0};
metaReaderDoInit(&mr, pVnode->pMeta, META_READER_LOCK);
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index b28431013c..abaa61744d 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -950,20 +950,8 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
return tqProcessTaskRetrieveRsp(pVnode->pTq, pMsg);
case TDMT_VND_STREAM_SCAN_HISTORY:
return tqProcessTaskScanHistory(pVnode->pTq, pMsg);
- case TDMT_STREAM_TASK_CHECKPOINT_READY:
- return tqProcessTaskCheckpointReadyMsg(pVnode->pTq, pMsg);
- case TDMT_STREAM_TASK_CHECKPOINT_READY_RSP:
- return tqProcessTaskCheckpointReadyRsp(pVnode->pTq, pMsg);
- case TDMT_STREAM_RETRIEVE_TRIGGER:
- return tqProcessTaskRetrieveTriggerReq(pVnode->pTq, pMsg);
- case TDMT_STREAM_RETRIEVE_TRIGGER_RSP:
- return tqProcessTaskRetrieveTriggerRsp(pVnode->pTq, pMsg);
- case TDMT_MND_STREAM_REQ_CHKPT_RSP:
- return tqProcessStreamReqCheckpointRsp(pVnode->pTq, pMsg);
case TDMT_VND_GET_STREAM_PROGRESS:
return tqStreamProgressRetrieveReq(pVnode->pTq, pMsg);
- case TDMT_MND_STREAM_CHKPT_REPORT_RSP:
- return tqProcessTaskChkptReportRsp(pVnode->pTq, pMsg);
default:
vError("unknown msg type:%d in stream queue", pMsg->msgType);
return TSDB_CODE_APP_ERROR;
@@ -990,6 +978,18 @@ int32_t vnodeProcessStreamCtrlMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pIn
return tqProcessTaskCheckReq(pVnode->pTq, pMsg);
case TDMT_VND_STREAM_TASK_CHECK_RSP:
return tqProcessTaskCheckRsp(pVnode->pTq, pMsg);
+ case TDMT_STREAM_TASK_CHECKPOINT_READY:
+ return tqProcessTaskCheckpointReadyMsg(pVnode->pTq, pMsg);
+ case TDMT_STREAM_TASK_CHECKPOINT_READY_RSP:
+ return tqProcessTaskCheckpointReadyRsp(pVnode->pTq, pMsg);
+ case TDMT_STREAM_RETRIEVE_TRIGGER:
+ return tqProcessTaskRetrieveTriggerReq(pVnode->pTq, pMsg);
+ case TDMT_STREAM_RETRIEVE_TRIGGER_RSP:
+ return tqProcessTaskRetrieveTriggerRsp(pVnode->pTq, pMsg);
+ case TDMT_MND_STREAM_REQ_CHKPT_RSP:
+ return tqProcessStreamReqCheckpointRsp(pVnode->pTq, pMsg);
+ case TDMT_MND_STREAM_CHKPT_REPORT_RSP:
+ return tqProcessTaskChkptReportRsp(pVnode->pTq, pMsg);
default:
vError("unknown msg type:%d in stream ctrl queue", pMsg->msgType);
return TSDB_CODE_APP_ERROR;
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 142529830a..dac6446402 100755
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -3911,7 +3911,7 @@ static EDealRes rewriteColsToSelectValFuncImpl(SNode** pNode, void* pContext) {
static int32_t rewriteColsToSelectValFunc(STranslateContext* pCxt, SSelectStmt* pSelect) {
nodesRewriteExprs(pSelect->pProjectionList, rewriteColsToSelectValFuncImpl, pCxt);
- if (TSDB_CODE_SUCCESS == pCxt->errCode && !pSelect->isDistinct) {
+ if (TSDB_CODE_SUCCESS == pCxt->errCode) {
nodesRewriteExprs(pSelect->pOrderByList, rewriteColsToSelectValFuncImpl, pCxt);
}
return pCxt->errCode;
diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h
index f922a5e03e..d9778a6a05 100644
--- a/source/libs/stream/inc/streamInt.h
+++ b/source/libs/stream/inc/streamInt.h
@@ -38,7 +38,7 @@ extern "C" {
#define META_HB_SEND_IDLE_COUNTER 25 // send hb every 5 sec
#define STREAM_TASK_KEY_LEN ((sizeof(int64_t)) << 1)
#define STREAM_TASK_QUEUE_CAPACITY 5120
-#define STREAM_TASK_QUEUE_CAPACITY_IN_SIZE (30)
+#define STREAM_TASK_QUEUE_CAPACITY_IN_SIZE (10)
// clang-format off
#define stFatal(...) do { if (stDebugFlag & DEBUG_FATAL) { taosPrintLog("STM FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index baf36d0453..41773ee42d 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -131,12 +131,12 @@ int32_t streamTaskBroadcastRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* r
code = tmsgSendReq(&pEpInfo->epSet, &rpcMsg);
if (code != 0) {
- rpcFreeCont(buf);
- return code;
+ stError("s-task:%s (child %d) failed to send retrieve req to task:0x%x (vgId:%d) QID:0x%" PRIx64 " code:%s",
+ pTask->id.idStr, pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req->reqId, tstrerror(code));
+ } else {
+ stDebug("s-task:%s (child %d) send retrieve req to task:0x%x (vgId:%d),QID:0x%" PRIx64, pTask->id.idStr,
+ pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req->reqId);
}
-
- stDebug("s-task:%s (child %d) send retrieve req to task:0x%x (vgId:%d),QID:0x%" PRIx64, pTask->id.idStr,
- pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req->reqId);
}
return code;
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index 5e099712ca..ee34648a47 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -807,6 +807,8 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
return 0;
}
+ int64_t st = taosGetTimestampMs();
+
EExtractDataCode ret = streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks, &blockSize);
if (ret == EXEC_AFTER_IDLE) {
streamTaskSetIdleInfo(pTask, MIN_INVOKE_INTERVAL);
@@ -841,8 +843,6 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
continue;
}
- int64_t st = taosGetTimestampMs();
-
// here only handle the data block sink operation
if (type == STREAM_INPUT__DATA_BLOCK) {
pTask->execInfo.sink.dataSize += blockSize;
@@ -873,6 +873,13 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
if (code) {
return code;
}
+
+ double el = (taosGetTimestampMs() - st) / 1000.0;
+ if (el > 5.0) { // elapsed more than 5 sec, not occupy the CPU anymore
+ stDebug("s-task:%s occupy more than 5.0s, release the exec threads and idle for 500ms", id);
+ streamTaskSetIdleInfo(pTask, 500);
+ return code;
+ }
}
}
}
diff --git a/tests/script/tsim/show/basic.sim b/tests/script/tsim/show/basic.sim
index 0acb97f1b6..595c386548 100644
--- a/tests/script/tsim/show/basic.sim
+++ b/tests/script/tsim/show/basic.sim
@@ -230,7 +230,7 @@ endi
sql_error show create stable t0;
sql show variables;
-if $rows != 88 then
+if $rows != 87 then
return -1
endi
diff --git a/tests/script/tsim/stream/checkpointInterval0.sim b/tests/script/tsim/stream/checkpointInterval0.sim
index a5e5c87704..d560edfab5 100644
--- a/tests/script/tsim/stream/checkpointInterval0.sim
+++ b/tests/script/tsim/stream/checkpointInterval0.sim
@@ -190,7 +190,7 @@ system sh/exec.sh -n dnode1 -s start
sql insert into t1 values(1648791223004,5,2,3,1.1);
loop4:
-sleep 1000
+run tsim/stream/checkTaskStatus.sim
sql select * from streamt;
diff --git a/tests/script/tsim/valgrind/checkError1.sim b/tests/script/tsim/valgrind/checkError1.sim
index 8ac43ebaf3..54427986a5 100644
--- a/tests/script/tsim/valgrind/checkError1.sim
+++ b/tests/script/tsim/valgrind/checkError1.sim
@@ -120,7 +120,7 @@ if $rows != 3 then
endi
sql show variables;
-if $rows != 88 then
+if $rows != 87 then
return -1
endi
diff --git a/tests/system-test/0-others/grant.py b/tests/system-test/0-others/grant.py
index 490541539f..25af6eb842 100644
--- a/tests/system-test/0-others/grant.py
+++ b/tests/system-test/0-others/grant.py
@@ -135,8 +135,38 @@ class TDTestCase:
port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir)
+
+ def getShowGrantsTimeSeries(self, maxRetry=10):
+ for nRetry in range(maxRetry):
+ tdSql.query("show grants")
+ timeseries = tdSql.queryResult[0][5]
+ tdSql.query("show grants full")
+ full_timeseries = tdSql.queryResult[1][3]
+ if timeseries == full_timeseries:
+ return int(timeseries.split('/')[0])
+ else:
+ tdLog.info(f"timeseries: {timeseries}, != full_timeseries: {full_timeseries}, retry: {nRetry}")
+ time.sleep(1)
+ raise Exception("Timeseries not equal within {maxRetry} seconds")
- def s1_check_alive(self):
+ def getTablesTimeSeries(self):
+ tdSql.query(f"select cast(sum(columns-1) as int) as tss from information_schema.ins_tables where db_name not in ('information_schema', 'performance_schema', 'audit')")
+ return int(tdSql.queryResult[0][0])
+
+ def checkGrantsTimeSeries(self, prompt="", nExpectedTimeSeries=0, maxRetry=10):
+ for nRetry in range(maxRetry):
+ tss_grant = self.getShowGrantsTimeSeries()
+ if tss_grant == nExpectedTimeSeries:
+ tss_table = self.getTablesTimeSeries()
+ if tss_grant == tss_table:
+ tdLog.info(f"{prompt}: tss_grant: {tss_grant} == tss_table: {tss_table}")
+ return
+ else:
+ raise Exception(f"{prompt}: tss_grant: {tss_grant} != tss_table: {tss_table}")
+ time.sleep(1)
+ raise Exception(f"{prompt}: tss_grant: {tss_grant} != nExpectedTimeSeries: {nExpectedTimeSeries}")
+
+ def s1_check_timeseries(self):
# check cluster alive
tdLog.printNoPrefix("======== test cluster alive: ")
tdSql.checkDataLoop(0, 0, 1, "show cluster alive;", 20, 0.5)
@@ -144,6 +174,46 @@ class TDTestCase:
tdSql.query("show db.alive;")
tdSql.checkData(0, 0, 1)
+ # check timeseries
+ tss_grant = 5
+ for i in range(0, 3):
+ tdLog.printNoPrefix(f"======== test timeseries: loop{i}")
+ self.checkGrantsTimeSeries("initial check", tss_grant)
+ tdSql.execute("create database if not exists db100")
+ tdSql.execute("create table db100.stb100(ts timestamp, c0 int,c1 bigint,c2 int,c3 float,c4 double) tags(t0 bigint unsigned)")
+ tdSql.execute("create table db100.ctb100 using db100.stb100 tags(100)")
+ tdSql.execute("create table db100.ctb101 using db100.stb100 tags(101)")
+ tdSql.execute("create table db100.ntb100 (ts timestamp, c0 int,c1 bigint,c2 int,c3 float,c4 double)")
+ tdSql.execute("create table db100.ntb101 (ts timestamp, c0 int,c1 bigint,c2 int,c3 float,c4 double)")
+ tss_grant += 20
+ self.checkGrantsTimeSeries("create tables and check", tss_grant)
+ tdSql.execute("alter table db100.stb100 add column c5 int")
+ tdSql.execute("alter stable db100.stb100 add column c6 int")
+ tdSql.execute("alter table db100.stb100 add tag t1 int")
+ tss_grant += 4
+ self.checkGrantsTimeSeries("add stable column and check", tss_grant)
+ tdSql.execute("create table db100.ctb102 using db100.stb100 tags(102, 102)")
+ tdSql.execute("alter table db100.ctb100 set tag t0=1000")
+ tdSql.execute("alter table db100.ntb100 add column c5 int")
+ tss_grant += 8
+ self.checkGrantsTimeSeries("add ntable column and check", tss_grant)
+ tdSql.execute("alter table db100.stb100 drop column c5")
+ tdSql.execute("alter table db100.stb100 drop tag t1")
+ tdSql.execute("alter table db100.ntb100 drop column c0")
+ tdSql.execute("alter table db100.stb100 drop column c0")
+ tss_grant -= 7
+ self.checkGrantsTimeSeries("drop stb/ntb column and check", tss_grant)
+ tdSql.execute("drop table db100.ctb100")
+ tdSql.execute("drop table db100.ntb100")
+ tss_grant -= 10
+ self.checkGrantsTimeSeries("drop ctb/ntb and check", tss_grant)
+ tdSql.execute("drop table db100.stb100")
+ tss_grant -= 10
+ self.checkGrantsTimeSeries("drop stb and check", tss_grant)
+ tdSql.execute("drop database db100")
+ tss_grant -= 5
+ self.checkGrantsTimeSeries("drop database and check", tss_grant)
+
def s2_check_show_grants_ungranted(self):
tdLog.printNoPrefix("======== test show grants ungranted: ")
self.infoPath = os.path.join(self.workPath, ".clusterInfo")
@@ -221,7 +291,7 @@ class TDTestCase:
# print(self.master_dnode.cfgDict)
# keep the order of following steps
self.s0_five_dnode_one_mnode()
- self.s1_check_alive()
+ self.s1_check_timeseries()
self.s2_check_show_grants_ungranted()
self.s3_check_show_grants_granted()
diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py
index 895df852c7..f380fdf00b 100644
--- a/tests/system-test/2-query/db.py
+++ b/tests/system-test/2-query/db.py
@@ -47,7 +47,7 @@ class TDTestCase:
def case2(self):
tdSql.query("show variables")
- tdSql.checkRows(88)
+ tdSql.checkRows(87)
for i in range(self.replicaVar):
tdSql.query("show dnode %d variables like 'debugFlag'" % (i + 1))
diff --git a/tests/system-test/2-query/distinct.py b/tests/system-test/2-query/distinct.py
index 5025b39753..bef1c18ad8 100644
--- a/tests/system-test/2-query/distinct.py
+++ b/tests/system-test/2-query/distinct.py
@@ -255,7 +255,31 @@ class TDTestCase:
tdSql.error(f"select distinct t1, t0 from (select t1,t0 from {dbname}.stb1 where t0 > 2 group by ts) where t1 < 3")
tdSql.query(f"select distinct stb1.t1, stb1.t2 from {dbname}.stb1, {dbname}.stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
tdSql.query(f"select distinct t1.t1, t1.t2 from {dbname}.t1, {dbname}.t2 where t1.ts=t2.ts ")
+
+ self.ts5971()
+ def ts5971(self):
+ dbname = "db"
+
+ tdSql.execute(f"DROP TABLE IF EXISTS {dbname}.t5971")
+ tdSql.execute(f"create table {dbname}.t5971 (time TIMESTAMP, c1 INT)")
+ tdSql.execute(f"INSERT INTO {dbname}.t5971(time, c1) VALUES (1641024000000, 1), (1641024005000, 2)")
+ tdSql.query(f"SELECT DISTINCT CSUM(c1), time FROM {dbname}.t5971 ORDER BY time")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, 1)
+ tdSql.checkData(0, 1, 1641024000000)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(1, 1, 1641024005000)
+
+ tdSql.query(f"SELECT DISTINCT CSUM(c1), time AS ref FROM {dbname}.t5971 ORDER BY ref")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, 1)
+ tdSql.checkData(0, 1, 1641024000000)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(1, 1, 1641024005000)
+
+ tdSql.query(f"SELECT DISTINCT CSUM(c1), time FROM {dbname}.t5971")
+ tdSql.checkRows(2)
def stop(self):
diff --git a/tools/keeper/README-CN.md b/tools/keeper/README-CN.md
index 770e9513c1..61ed631d8a 100644
--- a/tools/keeper/README-CN.md
+++ b/tools/keeper/README-CN.md
@@ -1,267 +1,123 @@
-# TaosKeeper
+
+# taosKeeper
-taosKeeper 是 TDengine 各项监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。并且 taosKeeper 企业版支持多种收集器,可以方便进行监控数据的展示。
+[](https://github.com/taosdata/TDengine/actions/workflows/taoskeeper-ci-build.yml)
+
+
+
+
+[](https://twitter.com/tdenginedb)
+[](https://www.youtube.com/@tdengine)
+[](https://discord.com/invite/VZdSuUg4pS)
+[](https://www.linkedin.com/company/tdengine)
+[](https://stackoverflow.com/questions/tagged/tdengine)
-taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
+简体中文 | [English](./README.md)
-## 构建
+
+## 目录
-### 获取源码
+- [1. 简介](#1-简介)
+- [2. 文档](#2-文档)
+- [3. 前置条件](#3-前置条件)
+- [4. 构建](#4-构建)
+- [5. 测试](#5-测试)
+ - [5.1 运行测试](#51-运行测试)
+ - [5.2 添加用例](#52-添加用例)
+ - [5.3 性能测试](#53-性能测试)
+- [6. CI/CD](#6-cicd)
+- [7. 提交 Issues](#7-提交-issues)
+- [8. 提交 PR](#8-提交-pr)
+- [9. 引用](#9-引用)
+- [10. 许可证](#10-许可证)
-从 GitHub 克隆源码:
+## 1. 简介
-```sh
-git clone https://github.com/taosdata/TDengine
-cd TDengine/tools/keeper
-```
+taosKeeper 是 TDengine 3.0 版本全新引入的监控指标导出工具,旨在方便用户对 TDengine 的运行状态和性能指标进行实时监控。只需进行简单配置,TDengine 就能将自身的运行状态和各项指标等信息上报给 taosKeeper。taosKeeper 在接收到监控数据后,会利用 taosAdapter 提供的 RESTful 接口,将这些数据存储到 TDengine 中。
-### 编译
+taosKeeper 的一个重要价值在于,它能够将多个甚至一批 TDengine 集群的监控数据集中存储到一个统一的平台。如此一来,监控软件便能轻松获取这些数据,进而实现对 TDengine 集群的全面监控与实时分析。通过 taosKeeper,用户可以更加便捷地了解 TDengine 的运行状况,及时发现并解决潜在问题,确保系统的稳定性和高效性。
-taosKeeper 使用 `GO` 语言编写,在构建前需要配置好 `GO` 语言开发环境。
+## 2. 文档
-```sh
-go mod tidy
+- 使用 taosKeeper,请参考 [taosKeeper 参考手册](https://docs.taosdata.com/reference/components/taoskeeper/),其中包括安装、配置、启动、数据收集与监控,以及集成 Prometheus 等方面的内容。
+- 本 README 主要面向希望自行贡献代码、编译和测试 taosKeeper 的开发者。如果想要学习 TDengine,可以浏览 [官方文档](https://docs.taosdata.com/)。
+
+## 3. 前置条件
+
+1. 已安装 Go 1.18 及以上版本。
+2. 本地已部署 TDengine,具体步骤请参考 [部署服务端](https://docs.taosdata.com/get-started/package/),且已启动 taosd 与 taosAdapter。
+
+## 4. 构建
+
+在 `TDengine/tools/keeper` 目录下运行以下命令以构建项目:
+
+```bash
go build
```
-## 安装
+## 5. 测试
-如果是自行构建的项目,仅需要拷贝 `taoskeeper` 文件到你的 `PATH` 中。
+### 5.1 运行测试
-```sh
-sudo install taoskeeper /usr/bin/
+在 `TDengine/tools/keeper` 目录下执行以下命令运行测试:
+
+```bash
+sudo go test ./...
```
-## 启动
+测试用例将连接到本地的 TDengine 服务器和 taosAdapter 进行测试。测试完成后,你将看到类似如下的结果摘要。如果所有测试用例均通过,输出中将不会出现 `FAIL` 字样。
-在启动前,应该做好如下配置:
-在 `/etc/taos/taoskeeper.toml` 配置 TDengine 连接参数以及监控指标前缀等其他信息。
-
-```toml
-# gin 框架是否启用 debug
-debug = false
-
-# 服务监听端口, 默认为 6043
-port = 6043
-
-# 日志级别,包含 panic、error、info、debug、trace等
-loglevel = "info"
-
-# 程序中使用协程池的大小
-gopoolsize = 50000
-
-# 查询 TDengine 监控数据轮询间隔
-RotationInterval = "15s"
-
-[tdengine]
-host = "127.0.0.1"
-port = 6041
-username = "root"
-password = "taosdata"
-
-# 需要被监控的 taosAdapter
-[taosAdapter]
-address = ["127.0.0.1:6041"]
-
-[metrics]
-# 监控指标前缀
-prefix = "taos"
-
-# 存放监控数据的数据库
-database = "log"
-
-# 指定需要监控的普通表
-tables = []
-
-[environment]
-# 是否在容器中运行,影响 taosKeeper 自身的监控数据
-incgroup = false
+```text
+ok github.com/taosdata/taoskeeper/api 17.405s
+ok github.com/taosdata/taoskeeper/cmd 1.819s
+ok github.com/taosdata/taoskeeper/db 0.484s
+ok github.com/taosdata/taoskeeper/infrastructure/config 0.417s
+ok github.com/taosdata/taoskeeper/infrastructure/log 0.785s
+ok github.com/taosdata/taoskeeper/monitor 4.623s
+ok github.com/taosdata/taoskeeper/process 0.606s
+ok github.com/taosdata/taoskeeper/system 3.420s
+ok github.com/taosdata/taoskeeper/util 0.097s
+ok github.com/taosdata/taoskeeper/util/pool 0.146s
```
-现在可以启动服务,输入:
+### 5.2 添加用例
-```sh
-taoskeeper
-```
+在以 `_test.go` 结尾的文件中添加测试用例,并且确保新增代码都有对应的测试用例覆盖。
-如果你使用 `systemd`,复制 `taoskeeper.service` 到 `/lib/systemd/system/`,并启动服务。
+### 5.3 性能测试
-```sh
-sudo cp taoskeeper.service /lib/systemd/system/
-sudo systemctl daemon-reload
-sudo systemctl start taoskeeper
-```
+性能测试正在开发中。
-让 taosKeeper 随系统开机自启动。
+## 6. CI/CD
-```sh
-sudo systemctl enable taoskeeper
-```
+- [Build Workflow](https://github.com/taosdata/TDengine/actions/workflows/taoskeeper-ci-build.yml)
+- Code Coverage - TODO
-如果使用 `systemd`,你可以使用如下命令完成安装。
+## 7. 提交 Issues
-```sh
-go mod tidy
-go build
-sudo install taoskeeper /usr/bin/
-sudo cp taoskeeper.service /lib/systemd/system/
-sudo systemctl daemon-reload
-sudo systemctl start taoskeeper
-sudo systemctl enable taoskeeper
-```
+我们欢迎提交 [GitHub Issue](https://github.com/taosdata/TDengine/issues)。提交时请尽量提供以下信息,以便快速定位问题:
-## Docker
+- 问题描述:具体问题表现及是否必现,建议附上详细调用堆栈或日志信息。
+- taosKeeper 版本:可通过 `taoskeeper -V` 获取版本信息。
+- TDengine 服务端版本:可通过 `taos -V` 获取版本信息。
-如下介绍了如何在 docker 中构建 taosKeeper:
+如有其它相关信息(如环境配置、操作系统版本等),请一并补充,以便我们更全面地了解问题。
-在构建前请配置好 `./config/taoskeeper.toml` 中合适的参数,并编辑 Dockerfile ,示例如下。
+## 8. 提交 PR
-```dockerfile
-FROM golang:1.18.6-alpine as builder
+我们欢迎开发者共同参与本项目开发,提交 PR 时请按照以下步骤操作:
-WORKDIR /usr/src/taoskeeper
-COPY ./ /usr/src/taoskeeper/
-ENV GO111MODULE=on \
- GOPROXY=https://goproxy.cn,direct
-RUN go mod tidy && go build
+1. Fork 仓库:请先 Fork 本仓库,具体步骤请参考 [如何 Fork 仓库](https://docs.github.com/en/get-started/quickstart/fork-a-repo)。
+2. 创建新分支:基于 `main` 分支创建一个新分支,并使用有意义的分支名称(例如:`git checkout -b feature/my_feature`)。请勿直接在 main 分支上进行修改。
+3. 开发与测试:完成代码修改后,确保所有单元测试都能通过,并为新增功能或修复的 Bug 添加相应的测试用例。
+4. 提交代码:将修改提交到远程分支(例如:`git push origin feature/my_feature`)。
+5. 创建 Pull Request:在 GitHub 上发起 [Pull Request](https://github.com/taosdata/TDengine/pulls),具体步骤请参考 [如何创建 Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request)。
+6. 检查 CI:提交 PR 后,可在 Pull Request 中找到自己提交的 PR,点击对应的链接,即可查看该 PR 的 CI 是否通过。若通过,会显示 `All checks have passed`。无论 CI 是否通过,均可点击 `Show all checks -> Details` 查看详细的测试用例日志。
-FROM alpine:3
-RUN mkdir -p /etc/taos
-COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
-COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
-EXPOSE 6043
-CMD ["taoskeeper"]
-```
+## 9. 引用
-如果已经有 taosKeeper 可执行文件,在配置好 `taoskeeper.toml` 后你可以使用如下方式构建:
+[TDengine 官网](https://www.taosdata.com/)
-```dockerfile
-FROM ubuntu:18.04
-RUN mkdir -p /etc/taos
-COPY ./taoskeeper /usr/bin/
-COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml
-EXPOSE 6043
-CMD ["taoskeeper"]
-```
+## 10. 许可证
-## 使用(**企业版**)
-
-### Prometheus (by scrape)
-
-taosKeeper 可以像 `node-exporter` 一样向 Prometheus 提供监控指标。\
-在 `/etc/prometheus/prometheus.yml` 添加配置:
-
-```yml
-global:
- scrape_interval: 5s
-
-scrape_configs:
- - job_name: "taoskeeper"
- static_configs:
- - targets: ["taoskeeper:6043"]
-```
-
-现在使用 PromQL 查询即可以显示结果,比如要查看指定主机(通过 FQDN 正则匹配表达式筛选)硬盘使用百分比:
-
-```promql
-taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"}
-```
-
-你可以使用 `docker-compose` 测试完整的链路。
-`docker-compose.yml`示例:
-
-```yml
-version: "3.7"
-
-services:
- tdengine:
- image: tdengine/tdengine
- environment:
- TAOS_FQDN: tdengine
- volumes:
- - taosdata:/var/lib/taos
- taoskeeper:
- build: ./
- depends_on:
- - tdengine
- environment:
- TDENGINE_HOST: tdengine
- TDENGINE_PORT: 6041
- volumes:
- - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
- ports:
- - 6043:6043
- prometheus:
- image: prom/prometheus
- volumes:
- - ./prometheus/:/etc/prometheus/
- ports:
- - 9090:9090
-volumes:
- taosdata:
-```
-
-启动:
-
-```sh
-docker-compose up -d
-```
-
-现在通过访问 来查询结果。访问[simple dashboard](https://grafana.com/grafana/dashboards/15164) 来查看TaosKeeper + Prometheus + Grafana 监控 TDengine 的快速启动实例。
-
-### Telegraf
-
-如果使用 telegraf 来收集各个指标,仅需要在配置中增加:
-
-```toml
-[[inputs.prometheus]]
-## An array of urls to scrape metrics from.
-urls = ["http://taoskeeper:6043/metrics"]
-```
-
-可以通过 `docker-compose` 来测试
-
-```sh
-docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper
-```
-
-由于可以在 `telegraf.conf` 设置日志为标准输出:
-
-```toml
-[[outputs.file]]
-files = ["stdout"]
-```
-
-所以你可以通过 `docker-compose logs` 在标准输出中追踪 TDengine 各项指标。
-
-```sh
-docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf
-```
-
-### Zabbix
-
-1. 导入 zabbix 临时文件 `zbx_taos_keeper_templates.xml`。
-2. 使用 `TDengine` 模板来创建主机,修改宏 `{$TAOSKEEPER_HOST}` 和 `{$COLLECTION_INTERVAL}`。
-3. 等待并查看到自动创建的条目。
-
-### 常见问题
-
-* 启动报错,显示connection refused
-
- **解析**:taosKeeper 依赖 restful 接口查询数据,请检查 taosAdapter 是否正常运行或 taoskeeper.toml 中 taosAdapter 地址是否正确。
-
-* taosKeeper 监控不同 TDengine 显示的检测指标数目不一致?
-
- **解析**:如果 TDengine 中未创建某项指标,taoskeeper 不能获取对应的检测结果。
-
-* 不能接收到 TDengine 的监控日志。
-
- **解析**: 修改 `/etc/taos/taos.cfg` 文件并增加如下参数:
-
- ```cfg
- monitor 1 // 启用monitor
- monitorInterval 30 // 发送间隔 (s)
- monitorFqdn localhost // 接收消息的FQDN,默认为空
- monitorPort 6043 // 接收消息的端口号
- monitorMaxLogs 100 // 每个监控间隔缓存的最大日志数量
- ```
+[AGPL-3.0 License](../../LICENSE)
diff --git a/tools/keeper/README.md b/tools/keeper/README.md
index 18e351f160..c282611564 100644
--- a/tools/keeper/README.md
+++ b/tools/keeper/README.md
@@ -1,273 +1,123 @@
-# TaosKeeper
+
+# taosKeeper
-TDengine Metrics Exporter for Kinds of Collectors, you can obtain the running status of TDengine by performing several simple configurations.
+[](https://github.com/taosdata/TDengine/actions/workflows/taoskeeper-ci-build.yml)
+
+
+
+
+[](https://twitter.com/tdenginedb)
+[](https://www.youtube.com/@tdengine)
+[](https://discord.com/invite/VZdSuUg4pS)
+[](https://www.linkedin.com/company/tdengine)
+[](https://stackoverflow.com/questions/tagged/tdengine)
-This tool uses TDengine RESTful API, so you could just build it without TDengine client.
+English | [简体中文](./README-CN.md)
-## Build
+
+## Table of Contents
-### Get the source codes
+- [1. Introduction](#1-introduction)
+- [2. Documentation](#2-documentation)
+- [3. Prerequisites](#3-prerequisites)
+- [4. Build](#4-build)
+- [5. Testing](#5-testing)
+ - [5.1 Test Execution](#51-test-execution)
+ - [5.2 Test Case Addition](#52-test-case-addition)
+ - [5.3 Performance Testing](#53-performance-testing)
+- [6. CI/CD](#6-cicd)
+- [7. Submitting Issues](#7-submitting-issues)
+- [8. Submitting PR](#8-submitting-pr)
+- [9. References](#9-references)
+- [10. License](#10-license)
-```sh
-git clone https://github.com/taosdata/TDengine
-cd TDengine/tools/keeper
-```
+## 1. Introduction
-### compile
+taosKeeper is a new monitoring indicator export tool introduced in TDengine 3.0, which is designed to facilitate users to monitor the operating status and performance indicators of TDengine in real time. With simple configuration, TDengine can report its own operating status and various indicators to taosKeeper. After receiving the monitoring data, taosKeeper will use the RESTful interface provided by taosAdapter to store the data in TDengine.
-```sh
-go mod tidy
+An important value of taosKeeper is that it can store the monitoring data of multiple or even a batch of TDengine clusters in a unified platform. In this way, the monitoring software can easily obtain this data, and then realize comprehensive monitoring and real-time analysis of the TDengine cluster. Through taosKeeper, users can more easily understand the operation status of TDengine, discover and solve potential problems in a timely manner, and ensure the stability and efficiency of the system.
+
+## 2. Documentation
+
+- To use taosKeeper, please refer to the [taosKeeper Reference](https://docs.tdengine.com/tdengine-reference/components/taoskeeper/), which includes installation, configuration, startup, data collection and monitoring, and Prometheus integration.
+- This README is mainly for developers who want to contribute code, compile and test taosKeeper. If you want to learn TDengine, you can browse the [official documentation](https://docs.tdengine.com/).
+
+## 3. Prerequisites
+
+1. Go 1.18 or above has been installed.
+2. TDengine has been deployed locally. For specific steps, please refer to [Deploy TDengine](https://docs.tdengine.com/get-started/deploy-from-package/), and taosd and taosAdapter have been started.
+
+## 4. Build
+
+Run the following command in the `TDengine/tools/keeper` directory to build the project:
+
+```bash
go build
```
-## Install
+## 5. Testing
-If you build the tool by your self, just copy the `taoskeeper` binary to your `PATH`.
+### 5.1 Test Execution
-```sh
-sudo install taoskeeper /usr/bin/
+Run the test by executing the following command in the `TDengine/tools/keeper` directory:
+
+```bash
+sudo go test ./...
```
-## Start
+The test case will connect to the local TDengine server and taosAdapter for testing. After the test is completed, you will see a result summary similar to the following. If all test cases pass, there will be no `FAIL` in the output.
-Before start, you should configure some options like database ip, port or the prefix and others for exported metrics.
-
-in `/etc/taos/taoskeeper.toml`.
-
-```toml
-# Start with debug middleware for gin
-debug = false
-
-# Listen port, default is 6043
-port = 6043
-
-# log level
-loglevel = "info"
-
-# go pool size
-gopoolsize = 50000
-
-# interval for TDengine metrics
-RotationInterval = "15s"
-
-[tdengine]
-host = "127.0.0.1"
-port = 6041
-username = "root"
-password = "taosdata"
-
-# list of taosAdapter that need to be monitored
-[taosAdapter]
-address = ["127.0.0.1:6041"]
-
-[metrics]
-# metrics prefix in metrics names.
-prefix = "taos"
-
-# database for storing metrics data
-database = "log"
-
-# export some tables that are not super table
-tables = []
-
-[environment]
-# Whether running in cgroup.
-incgroup = false
+```text
+ok github.com/taosdata/taoskeeper/api 17.405s
+ok github.com/taosdata/taoskeeper/cmd 1.819s
+ok github.com/taosdata/taoskeeper/db 0.484s
+ok github.com/taosdata/taoskeeper/infrastructure/config 0.417s
+ok github.com/taosdata/taoskeeper/infrastructure/log 0.785s
+ok github.com/taosdata/taoskeeper/monitor 4.623s
+ok github.com/taosdata/taoskeeper/process 0.606s
+ok github.com/taosdata/taoskeeper/system 3.420s
+ok github.com/taosdata/taoskeeper/util 0.097s
+ok github.com/taosdata/taoskeeper/util/pool 0.146s
```
-Now you could run the tool:
+### 5.2 Test Case Addition
-```sh
-taoskeeper
-```
+Add test cases in files ending with `_test.go` and make sure the new code is covered by the corresponding test cases.
-If you use `systemd`, copy the `taoskeeper.service` to `/lib/systemd/system/` and start the service.
+### 5.3 Performance Testing
-```sh
-sudo cp taoskeeper.service /lib/systemd/system/
-sudo systemctl daemon-reload
-sudo systemctl start taoskeeper
-```
+Performance testing is under development.
-To start taoskeeper whenever os rebooted, you should enable the systemd service:
+## 6. CI/CD
-```sh
-sudo systemctl enable taoskeeper
-```
+- [Build Workflow](https://github.com/taosdata/TDengine/actions/workflows/taoskeeper-ci-build.yml)
+- Code Coverage - TODO
-So if use `systemd`, you'd better install it with these lines all-in-one:
+## 7. Submitting Issues
-```sh
-go mod tidy
-go build
-sudo install taoskeeper /usr/bin/
-sudo cp taoskeeper.service /lib/systemd/system/
-sudo systemctl daemon-reload
-sudo systemctl start taoskeeper
-sudo systemctl enable taoskeeper
-```
+We welcome submissions of [GitHub Issues](https://github.com/taosdata/TDengine/issues). Please provide the following information when submitting so that the problem can be quickly located:
-## Docker
+- Problem description: The specific problem manifestation and whether it must occur. It is recommended to attach detailed call stack or log information.
+- taosKeeper version: You can get the version information through `taoskeeper -V`.
+- TDengine server version: You can get the version information through `taos -V`.
-Here is an example to show how to build this tool in docker:
+If you have other relevant information (such as environment configuration, operating system version, etc.), please add it so that we can understand the problem more comprehensively.
-Before building, you should configure `./config/taoskeeper.toml` with proper parameters and edit Dockerfile. Take following as example.
+## 8. Submitting PR
-```dockerfile
-FROM golang:1.18.2 as builder
+We welcome developers to participate in the development of this project. Please follow the steps below when submitting a PR:
-WORKDIR /usr/src/taoskeeper
-COPY ./ /usr/src/taoskeeper/
-ENV GO111MODULE=on \
- GOPROXY=https://goproxy.cn,direct
-RUN go mod tidy && go build
+1. Fork the repository: Please fork this repository first. For specific steps, please refer to [How to Fork a Repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo).
+2. Create a new branch: Create a new branch based on the `main` branch and use a meaningful branch name (for example: `git checkout -b feature/my_feature`). Do not modify it directly on the main branch.
+3. Development and testing: After completing the code modification, make sure that all unit tests pass, and add corresponding test cases for new features or fixed bugs.
+4. Submit code: Submit the changes to the remote branch (for example: `git push origin feature/my_feature`).
+5. Create a Pull Request: Initiate a [Pull Request](https://github.com/taosdata/TDengine/pulls) on GitHub. For specific steps, please refer to [How to Create a Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request).
+6. Check CI: After submitting the PR, you can find the PR you submitted in the Pull Request and click the corresponding link to check whether the CI of the PR has passed. If it has passed, it will show `All checks have passed`. Regardless of whether CI has passed or not, you can click `Show all checks/Details` to view detailed test case logs.
-FROM alpine:3
-RUN mkdir -p /etc/taos
-COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
-COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
-EXPOSE 6043
-CMD ["taoskeeper"]
-```
+## 9. References
-If you already have taosKeeper binary file, you can build this tool like:
+[TDengine Official Website](https://www.tdengine.com/)
-```dockerfile
-FROM ubuntu:18.04
-RUN mkdir -p /etc/taos
-COPY ./taoskeeper /usr/bin/
-COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml
-EXPOSE 6043
-CMD ["taoskeeper"]
-```
+## 10. License
-## Usage (**Enterprise Edition**)
-
-### Prometheus (by scrape)
-
-It's now act as a prometheus exporter like `node-exporter`.
-
-Here's how to add this in scrape configs of `/etc/prometheus/prometheus.yml`:
-
-```yml
-global:
- scrape_interval: 5s
-
-scrape_configs:
- - job_name: "taoskeeper"
- static_configs:
- - targets: [ "taoskeeper:6043" ]
-```
-
-Now PromQL query will show the right result, for example, to show disk used percent in an specific host with FQDN regex
-match expression:
-
-```promql
-taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"}
-```
-
-You can use `docker-compose` with the current `docker-compose.yml` to test the whole stack.
-
-Here is the `docker-compose.yml`:
-
-```yml
-version: "3.7"
-
-services:
- tdengine:
- image: tdengine/tdengine
- environment:
- TAOS_FQDN: tdengine
- volumes:
- - taosdata:/var/lib/taos
- taoskeeper:
- build: ./
- depends_on:
- - tdengine
- environment:
- TDENGINE_HOST: tdengine
- TDENGINE_PORT: 6041
- volumes:
- - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
- ports:
- - 6043:6043
- prometheus:
- image: prom/prometheus
- volumes:
- - ./prometheus/:/etc/prometheus/
- ports:
- - 9090:9090
-volumes:
- taosdata:
-
-```
-
-Start the stack:
-
-```sh
-docker-compose up -d
-```
-
-Now you point to (if you have not started a prometheus server by yourself) and query.
-
-For a quick demo with TaosKeeper + Prometheus + Grafana, we provide
-a [simple dashboard](https://grafana.com/grafana/dashboards/15164) to monitor TDengine.
-
-### Telegraf
-
-If you are using telegraf to collect metrics, just add inputs like this:
-
-```toml
-[[inputs.prometheus]]
- ## An array of urls to scrape metrics from.
- urls = ["http://taoskeeper:6043/metrics"]
-```
-
-You can test it with `docker-compose`:
-
-```sh
-docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper
-```
-
-Since we have set an stdout file output in `telegraf.conf`:
-
-```toml
-[[outputs.file]]
- files = ["stdout"]
-```
-
-So you can track with TDengine metrics in standard output with `docker-compose logs`:
-
-```sh
-docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf
-```
-
-### Zabbix
-
-1. Import the zabbix template file `zbx_taos_keeper_templates.xml`.
-2. Use the template `TDengine` to create the host and modify the macros `{$TAOSKEEPER_HOST}`
- and `{$COLLECTION_INTERVAL}`.
-3. Waiting for monitoring items to be created automatically.
-
-### FAQ
-
-* Error occurred: Connection refused, while taosKeeper was starting
-
- **Answer**: taoskeeper relies on restful interfaces to query data. Check whether the taosAdapter is running or whether
- the taosAdapter address in taoskeeper.toml is correct.
-
-* Why detection metrics displayed by different TDengine's inconsistent with taoskeeper monitoring?
-
- **Answer**: If a metric is not created in TDengine, taoskeeper cannot get the corresponding test results.
-
-* Cannot receive log from TDengine server.
-
- **Answer**: Modify `/etc/taos/taos.cfg` file and add parameters like:
-
- ```cfg
- monitor 1 // start monitor
- monitorInterval 30 // send log interval (s)
- monitorFqdn localhost
- monitorPort 6043 // taosKeeper port
- monitorMaxLogs 100
- ```
+[AGPL-3.0 License](../../LICENSE)