Merge branch '3.0' of https://github.com/taosdata/TDengine into docs/wade-3.0

This commit is contained in:
gccgdb1234 2024-08-12 14:09:35 +08:00
commit ef447bab66
106 changed files with 1949 additions and 961 deletions

View File

@ -88,7 +88,7 @@ For more details about `INSERT` please refer to [INSERT](../../../reference/taos
<TabItem label="Rust" value="rust">
<RustSQL />
</TabItem>
<TabItem label="Node.js" value="nodejs">
<TabItem label="Node.js" value="node">
<NodeSQL />
</TabItem>
<TabItem label="C#" value="csharp">
@ -128,7 +128,7 @@ Parameter binding is available only with native connection.
<TabItem label="Rust" value="rust">
<RustStmt />
</TabItem>
<TabItem label="Node.js" value="nodejs">
<TabItem label="Node.js" value="node">
<NodeStmt />
</TabItem>
<TabItem label="C#" value="csharp">

View File

@ -60,7 +60,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
<TabItem label="Go" value="go">
<GoLine />
</TabItem>
<TabItem label="Node.js" value="nodejs">
<TabItem label="Node.js" value="node">
<NodeLine />
</TabItem>
<TabItem label="C#" value="csharp">

View File

@ -52,7 +52,7 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
<TabItem label="Go" value="go">
<GoTelnet />
</TabItem>
<TabItem label="Node.js" value="nodejs">
<TabItem label="Node.js" value="node">
<NodeTelnet />
</TabItem>
<TabItem label="C#" value="csharp">

View File

@ -67,7 +67,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
<TabItem label="Go" value="go">
<GoJson />
</TabItem>
<TabItem label="Node.js" value="nodejs">
<TabItem label="Node.js" value="node">
<NodeJson />
</TabItem>
<TabItem label="C#" value="csharp">

View File

@ -142,7 +142,7 @@ In the section describing [Insert](../insert-data/sql-writing), a database named
<TabItem label="Rust" value="rust">
<RustQuery />
</TabItem>
<TabItem label="Node.js" value="nodejs">
<TabItem label="Node.js" value="node">
<NodeQuery />
</TabItem>
<TabItem label="C#" value="csharp">

View File

@ -11,11 +11,11 @@ public class WSConnectExample {
// ANCHOR: main
public static void main(String[] args) throws SQLException {
// use
// String jdbcUrl = "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata";
// String jdbcUrl =
// "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata&batchfetch=true";
// if you want to connect a specified database named "dbName".
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata";
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata&batchfetch=true";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
@ -26,8 +26,10 @@ public static void main(String[] args) throws SQLException {
// you can use the connection for execute SQL here
} catch (SQLException ex) {
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
// handle any errors, please refer to the JDBC specifications for detailed
// exceptions info
System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: "
+ ex.getMessage());
throw ex;
} catch (Exception ex) {
System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage());

View File

@ -40,21 +40,21 @@ taosKeeper 的配置文件默认位于 `/etc/taos/taoskeeper.toml`。 详细配
#### 导入仪表盘
TDengine 数据源插件已提交至 Grafana 官网,完成插件的安装和数据源的创建后,可以进行 TDinsight 仪表盘的导入。
TDengine 数据源插件已提交至 Grafana 官网,如何安装 TDengine 数据源插件和配置数据源请参考:[安装 Grafana Plugin 并配置数据源](../../third-party/visual/grafana/#安装-grafana-plugin-并配置数据源)。完成插件的安装和数据源的创建后,可以进行 TDinsight 仪表盘的导入。
在 Grafana 的 ”Home“ -> ”Dashboards“ 页面,点击位于右上角的 ”New“ -> ”import“ 按钮,即可进入 Dashboard 的导入页面,它支持以下两种导入方式。
在 Grafana 的 “Home” -> “Dashboards” 页面,点击位于右上角的 “New” -> “import” 按钮,即可进入 Dashboard 的导入页面,它支持以下两种导入方式。
- Dashboard ID18180。
- Dashboard URLhttps://grafana.com/grafana/dashboards/18180-tdinsight-for-3-x/
填写以上 Dashboard ID 或 Dashboard URL 以后,点击 ”Load“ 按钮按照向导操作即可完成导入。导入成功后Dashboards 列表页面会出现 ”TDinsight for 3.x“ 仪表盘,点击进入后,就可以看到 TDinsight 中已创建的各个指标的面板,如下图所示:
填写以上 Dashboard ID 或 Dashboard URL 以后,点击 “Load” 按钮按照向导操作即可完成导入。导入成功后Dashboards 列表页面会出现 “TDinsight for 3.x” 仪表盘,点击进入后,就可以看到 TDinsight 中已创建的各个指标的面板,如下图所示:
![TDinsight 界面示例](./TDinsight-1-cluster-status.webp)
**注意** 在 TDinsight 界面左上角的 ”Log from“ 下拉列表中可以选择 `log` 数据库。
**注意** 在 TDinsight 界面左上角的 “Log from” 下拉列表中可以选择 `log` 数据库。
### TDengine V3 监控数据
TDinsight dashboard 数据来源于 `log` 库(存放监控数据的默认数据库,可以在 taoskeeper 配置文件中修改)。”TDinsight for 3.x“ 仪表盘查询了 taosd 和 TaosAdapter 的监控指标。
TDinsight dashboard 数据来源于 `log` 库(存放监控数据的默认数据库,可以在 taoskeeper 配置文件中修改)。“TDinsight for 3.x” 仪表盘查询了 taosd 和 TaosAdapter 的监控指标。
- taosd 的监控指标请参考 [taosd 监控指标](../../reference/components/taosd/#taosd-监控指标)
- taosAdapter 的监控指标请参考 [taosAdapter 监控指标](../../reference/components/taosadapter/#taosadapter-监控指标)
@ -66,18 +66,15 @@ taosX 是 TDengine 中提供零代码数据接入能力的核心组件,对它
3. 运行在 taosX 端或 taosx-agent 端的各个连接器子进程
4. 运行中的各类数据写入任务
### 版本支持
### 前置条件
1. TDengine 企业版本 3.2.3.0 或以上版本包含的 taosX 才包含此功能。如果单独安装 taosX需要 taosX 1.5.0 或以上版本。
2. 需要安装 Grafana 插件 [TDengie Datasource v3.5.0](https://grafana.com/grafana/plugins/tdengine-datasource/) 或以上版本。
1. taosdtaosAdapter 和 taosKeeper 都已经部署完成并启动成功。
2. taosX 服务监控配置正确,如何配置可以参考下文 “配置 taosX 监控”,服务启动成功。
**注意**TDengine 企业版本 3.2.3.0 或以上版本包含的 taosX 才包含此功能。如果单独安装 taosX需要 taosX 1.5.0 或以上版本。
3. 部署 Grafana ,安装 TDengine Datasource 插件,配置好数据源。可以参考:[安装 Grafana Plugin 并配置数据源](../../third-party/visual/grafana/#安装-grafana-plugin-并配置数据源)。
**注意**:需要安装 Grafana 插件 [TDengie Datasource v3.5.0](https://grafana.com/grafana/plugins/tdengine-datasource/) 或以上版本。
### 准备工作
假设你已经部署好了 taosdtaosAdapter 和 taosAdapter。 那么还需要:
1. 启动 taosX 服务。
2. 部署 Grafana ,安装 TDengine Datasource 插件,配置好数据源。
### 配置 taosX
### 配置 taosX 监控
toasX 的配置文件(默认 /etc/taos/taosx.toml) 中与 monitor 相关的配置如下:
@ -101,12 +98,12 @@ toasX 的配置文件(默认 /etc/taos/taosx.toml) 中与 monitor 相关的配
### 基于 TDinsight 监控 tasoX
"TDinsight for taosX" 是专门为 taosX 监控创建的 Grafana 面板。使用前需要先导入这个面板。
“TDinsight for taosX” 是专门为 taosX 监控创建的 Grafana 面板。使用前需要先导入这个面板。
#### 进入面板
1. 在 Grafana 界面菜单中点击 ”Data sources“ 然后选择已经配置好的 TDengine 数据源。
2. 在数据源配置界面选择 “Dashboard” Tab, 然后导入 ”TDinsight for taosX“ 面板(第一次使用需要先导入)。 下面是一个示例图:
1. 在 Grafana 界面菜单中点击 “Data sources” 然后选择已经配置好的 TDengine 数据源。
2. 在数据源配置界面选择 “Dashboard” Tab, 然后导入 “TDinsight for taosX” 面板(第一次使用需要先导入)。 下面是一个示例图:
![monitor rows](./pic/monitor-04.jpg)

View File

@ -20,7 +20,7 @@ import VerifyLinux from "../../14-reference/05-connector/_verify_linux.mdx";
import VerifyMacOS from "../../14-reference/05-connector/_verify_macos.mdx";
import VerifyWindows from "../../14-reference/05-connector/_verify_windows.mdx";
TDengine 提供了丰富的应用程序开发接口为了便于用户快速开发自己的应用TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C#、Rust、Lua社区贡献和 PHP 社区贡献的连接器。这些连接器支持使用原生接口taosc和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
TDengine 提供了丰富的应用程序开发接口为了便于用户快速开发自己的应用TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C#、Rust、Lua社区贡献和 PHP 社区贡献的连接器。这些连接器支持使用原生接口taosc和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。另外 TDengine 还可以直接调用 taosadapter 提供的 REST API 接口,进行数据写入和查询操作。
## 连接方式
@ -33,6 +33,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
![TDengine connection type](connection-type-zh.webp)
无论使用何种方式建立连接,连接器都提供了相同或相似的 API 操作数据库,都可以执行 SQL 语句,只是初始化连接的方式稍有不同,用户在使用上不会感到什么差别。
各种连接方式和各语言连接器支持情况请参考:[连接器功能特性](../../reference/connector/#功能特性)
关键不同点在于:
@ -251,7 +252,10 @@ dotnet add package TDengine.Connector
<TabItem label="C" value="c">
如果已经安装了 TDengine 服务端软件或 TDengine 客户端驱动 taosc 那么已经安装了 C 连接器,无需额外操作。
<br/>
</TabItem>
<TabItem label="REST API" value="rest">
使用 REST API 方式访问 TDengine无需安装任何驱动和连接器。
</TabItem>
</Tabs>
@ -259,18 +263,20 @@ dotnet add package TDengine.Connector
## 建立连接
在执行这一步之前,请确保有一个正在运行的,且可以访问到的 TDengine而且服务端的 FQDN 配置正确。以下示例代码,都假设 TDengine 安装在本机,且 FQDN默认 localhost 和 serverPort默认 6030 都使用默认配置。
### 连接参数
连接的配置项较多,因此在建立连接之前,我们能先介绍一下各语言连接器建立连接使用的参数。
<Tabs defaultValue="java" groupId="lang">
<TabItem label="Java" value="java">
Java 连接器建立连接的参数有 URL 和 Properties。
TDengine 的 JDBC URL 规范格式为:
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]`
URL 和 Properties 的详细参数说明和如何使用详见 [url 规范](../../reference/connector/java/#url-规范)
**注**REST 连接中增加 `batchfetch` 参数并设置为 true将开启 WebSocket 连接。
</TabItem>
<TabItem label="Python" value="python">
Python 连接器使用 `connect()` 方法来建立连接,下面是连接参数的具体说明:
@ -343,7 +349,7 @@ DSN 的详细说明和如何使用详见 [连接功能](../../reference/connecto
- **database**: 数据库名称。
- **params**: 其他参数。 例如token。
- 完整 D 示例:
- 完整 DSN 示例:
```js
ws://root:taosdata@localhost:6041
@ -392,7 +398,10 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数
还提供了 `taos_connect_auth()` 函数用于使用 MD5 加密的密码建立与 TDengine 数据库的连接。此函数与 `taos_connect` 功能相同,不同之处在于密码的处理方式,`taos_connect_auth` 需要的是密码的 MD5 加密字符串。
</TabItem>
<TabItem label="REST API" value="rest">
使用 REST API 方式访问 TDengine由应用程序去建立 HTTP 连接,自己控制 HTTP 连接参数。
</TabItem>
</Tabs>
### Websocket 连接
@ -429,6 +438,13 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数
{{#include docs/examples/csharp/wsConnect/Program.cs:main}}
```
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
### 原生连接
@ -453,6 +469,9 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数
{{#include docs/examples/rust/nativeexample/examples/connect.rs}}
```
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
```csharp
{{#include docs/examples/csharp/connect/Program.cs:main}}
@ -462,6 +481,10 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数
<ConnC />
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
### REST 连接
@ -483,6 +506,23 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数
{{#include docs/examples/go/connect/restexample/main.go}}
```
</TabItem>
<TabItem label="Rust" value="rust">
不支持
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
不支持
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
使用 REST API 方式访问 TDengine由应用程序自主去建立 HTTP 连接。
</TabItem>
</Tabs>

View File

@ -57,7 +57,7 @@ REST API直接调用 `taosadapter` 提供的 REST API 接口,进行数据
```
</TabItem>
<TabItem label="Node.js" value="node.js">
<TabItem label="Node.js" value="node">
```js
{{#include docs/examples/node/websocketexample/sql_example.js:create_db_and_table}}
```
@ -90,7 +90,7 @@ curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql/power' \
</TabItem>
</Tabs>
> **注意**如果不使用 `USE power` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 `power.meters`
> **注意**建议采用 `<dbName>.<tableName>` 的格式构造SQL语句不推荐在应用中采用 `USE DBName` 方式访问
## 插入数据
下面以智能电表为例,展示如何使用连接器执行 SQL 来插入数据到 `power` 数据库的 `meters` 超级表。样例使用 TDengine 自动建表 SQL 语法,写入 d1001 子表中 3 条数据,写入 d1002 子表中 1 条数据,然后打印出实际插入数据条数。
@ -133,7 +133,7 @@ NOW 为系统内部函数,默认为客户端所在计算机当前时间。 NOW
```
</TabItem>
<TabItem label="Node.js" value="node.js">
<TabItem label="Node.js" value="node">
```js
{{#include docs/examples/node/websocketexample/sql_example.js:insertData}}
```
@ -207,7 +207,7 @@ rust 连接器还支持使用 **serde** 进行反序列化行为结构体的结
```
</TabItem>
<TabItem label="Node.js" value="node.js">
<TabItem label="Node.js" value="node">
```js
{{#include docs/examples/node/websocketexample/sql_example.js:queryData}}
```
@ -282,7 +282,7 @@ reqId 可用于请求链路追踪reqId 就像分布式系统中的 traceId
```
</TabItem>
<TabItem label="Node.js" value="node.js">
<TabItem label="Node.js" value="node">
```js
{{#include docs/examples/node/websocketexample/sql_example.js:sqlWithReqid}}
```

View File

@ -169,7 +169,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
```
执行带有 reqId 的无模式写入, reqId 可用于请求链路追踪。
执行带有 reqId 的无模式写入,最后一个参数 reqId 可用于请求链路追踪。
```java
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
@ -194,7 +194,7 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO
```
</TabItem>
<TabItem label="Node.js" value="node.js">
<TabItem label="Node.js" value="node">
```js
{{#include docs/examples/node/websocketexample/line_example.js}}
```
@ -204,6 +204,12 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO
{{#include docs/examples/csharp/wssml/Program.cs:main}}
```
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
### 原生连接
@ -213,7 +219,7 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO
{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java:schemaless}}
```
执行带有 reqId 的无模式写入, reqId 可用于请求链路追踪。
执行带有 reqId 的无模式写入,最后一个参数 reqId 可用于请求链路追踪。
```java
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
@ -237,7 +243,9 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO
{{#include docs/examples/rust/nativeexample/examples/schemaless.rs}}
```
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
```csharp
@ -249,7 +257,9 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO
{{#include docs/examples/c/sml_insert_demo.c:schemaless}}
```
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
## 查询写入的数据

View File

@ -63,6 +63,12 @@ import TabItem from "@theme/TabItem";
{{#include docs/examples/csharp/wsStmt/Program.cs:main}}
```
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
## 原生连接
@ -93,6 +99,9 @@ import TabItem from "@theme/TabItem";
{{#include docs/examples/rust/nativeexample/examples/stmt.rs}}
```
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
```csharp
@ -104,5 +113,7 @@ import TabItem from "@theme/TabItem";
{{#include docs/examples/c/stmt_insert_demo.c}}
```
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>

View File

@ -95,6 +95,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
同通用基础配置项。
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
@ -148,6 +151,12 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
{{#include docs/examples/csharp/wssubscribe/Program.cs:create_consumer}}
```
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
@ -187,7 +196,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
```
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
```csharp
{{#include docs/examples/csharp/subscribe/Program.cs:create_consumer}}
@ -206,7 +217,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
调用 `build_consumer` 函数尝试获取消费者实例 `tmq`。成功则打印成功日志,失败则打印失败日志。
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
## 订阅消费数据
@ -267,7 +280,12 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
{{#include docs/examples/csharp/wssubscribe/Program.cs:subscribe}}
```
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
### 原生连接
@ -311,7 +329,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
- `Record` 是我们自定义的一个结构体,其字段名和数据类型与列的名称和数据类型一一对应,这样可以通过 `serde` 反序列化出 `Record` 类型的对象。
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
```csharp
{{#include docs/examples/csharp/subscribe/Program.cs:subscribe}}
@ -343,7 +363,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
5. 调用 `basic_consume_loop` 函数开始基本的消费循环,处理订阅的消息。
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
## 指定订阅的 Offset
@ -402,6 +424,12 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
{{#include docs/examples/csharp/wssubscribe/Program.cs:seek}}
```
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
### 原生连接
@ -445,7 +473,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
2. 在所有分区的偏移量调整完成后,再次获取并记录消费者的分区分配信息,以确认偏移量调整后的状态。
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
```csharp
{{#include docs/examples/csharp/subscribe/Program.cs:seek}}
@ -464,6 +494,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
5. 释放分配信息数组以释放资源。
6. 调用 `basic_consume_loop` 函数开始新的的消费循环,处理消息。
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
@ -518,6 +551,12 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
{{#include docs/examples/csharp/wssubscribe/Program.cs:commit_offset}}
```
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
### 原生连接
@ -551,7 +590,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
可以通过 `consumer.commit` 方法来手工提交消费进度。
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
```csharp
{{#include docs/examples/csharp/subscribe/Program.cs:commit_offset}}
@ -565,6 +606,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
可以通过 `tmq_commit_sync` 函数来手工提交消费进度。
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
@ -615,6 +659,12 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
{{#include docs/examples/csharp/wssubscribe/Program.cs:close}}
```
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
### 原生连接
@ -646,7 +696,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
**注意**:消费者取消订阅后无法重用,如果想订阅新的 `topic` 请重新创建消费者。
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
```csharp
{{#include docs/examples/csharp/subscribe/Program.cs:close}}
@ -658,7 +710,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
{{#include docs/examples/c/tmq_demo.c:unsubscribe_and_close}}
```
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
@ -720,7 +774,12 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
```
</details>
</TabItem>
<TabItem label="C" value="c">
不支持
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>
### 原生连接
@ -765,7 +824,9 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
```
</details>
</TabItem>
<TabItem label="Node.js" value="node">
不支持
</TabItem>
<TabItem label="C#" value="csharp">
<details>
<summary>完整代码示例</summary>
@ -783,4 +844,7 @@ Rust 连接器创建消费者的参数为 DSN 可以设置的参数列表请
```
</details>
</TabItem>
<TabItem label="REST API" value="rest">
不支持
</TabItem>
</Tabs>

View File

@ -410,7 +410,7 @@ def finish(buf: bytes) -> output_type:
#### 示例一
编写一个只接收一个整数的 UDF 函数: 输入 n 输出 ln(n^2 + 1)。
首先编写一个 Python 文件,存在系统某个目录,比如 /root/udf/myfun.py 内容如下
首先编写一个 Python 文件,存在系统某个目录,比如 /root/udf/myfun.py 内容如下
```python
from math import log
@ -426,23 +426,25 @@ def process(block):
return [log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
```
这个文件包含 3 个函数, init 和 destroy 都是空函数,它们是 UDF 的生命周期函数,即使什么都不做也要定义。最关键的是 process 函数, 它接受一个数据块,这个数据块对象有两个方法
这个文件包含 3 个函数, init 和 destroy 都是空函数,它们是 UDF 的生命周期函数,即使什么都不做也要定义。最关键的是 process 函数, 它接受一个数据块,这个数据块对象有两个方法
1. shape() 返回数据块的行数和列数
2. data(i, j) 返回 i 行 j 列的数据
标量函数的 process 方法传人的数据块有多少行,就需要返回多少个数据。上述代码中我们忽略的列数,因为我们只想对每行的第一个数做计算。
接下来我们创建对应的 UDF 函数,在 TDengine CLI 中执行下面语句:
标量函数的 process 方法传入的数据块有多少行,就需要返回多少行数据。上述代码忽略列数,因为只需对每行的第一列做计算。
接下来创建对应的 UDF 函数,在 TDengine CLI 中执行下面语句。
```sql
create function myfun as '/root/udf/myfun.py' outputtype double language 'Python'
```
其输出如下
其输出如下
```shell
taos> create function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
Create OK, 0 row(s) affected (0.005202s)
```
看起来很顺利,接下来 show 一下系统中所有的自定义函数,确认创建成功:
看起来很顺利,接下来查看系统中所有的自定义函数,确认创建成功。
```text
taos> show functions;
@ -452,7 +454,7 @@ taos> show functions;
Query OK, 1 row(s) in set (0.005767s)
```
接下来就来测试一下这个函数,测试之前先执行下面的 SQL 命令,制造些测试数据,在 TDengine CLI 中执行下述命令
生成测试数据,可以在 TDengine CLI 中执行下述命令。
```sql
create database test;
@ -462,7 +464,7 @@ insert into t values('2023-05-03 08:09:10', 2, 3, 4);
insert into t values('2023-05-10 07:06:05', 3, 4, 5);
```
测试 myfun 函数
测试 myfun 函数
```sql
taos> select myfun(v1, v2) from t;
@ -470,14 +472,13 @@ taos> select myfun(v1, v2) from t;
DB error: udf function execution failure (0.011088s)
```
不幸的是执行失败了,什么原因呢?
查看 udfd 进程的日志
不幸的是执行失败了,什么原因呢?查看 udfd 进程的日志。
```shell
tail -10 /var/log/taos/udfd.log
```
发现以下错误信息
发现以下错误信息
```text
05/24 22:46:28.733545 01665799 UDF ERROR can not load library libtaospyudf.so. error: operation not permitted
@ -486,7 +487,7 @@ tail -10 /var/log/taos/udfd.log
错误很明确:没有加载到 Python 插件 libtaospyudf.so如果遇到此错误请参考前面的准备环境一节。
修复环境错误后再次执行,如下
修复环境错误后再次执行,如下
```sql
taos> select myfun(v1) from t;
@ -501,7 +502,7 @@ taos> select myfun(v1) from t;
#### 示例二
上面的 myfun 虽然测试测试通过了,但是有两个缺点
上面的 myfun 虽然测试测试通过了,但是有两个缺点
1. 这个标量函数只接受 1 列数据作为输入,如果用户传入了多列也不会抛异常。
@ -514,8 +515,7 @@ taos> select myfun(v1, v2) from t;
2.302585093 |
```
2. 没有处理 null 值。我们期望如果输入有 null则会抛异常终止执行。
因此 process 函数改进如下:
2. 没有处理 null 值。我们期望如果输入有 null则会抛异常终止执行。因此 process 函数改进如下。
```python
def process(block):
@ -525,13 +525,13 @@ def process(block):
return [ None if block.data(i, 0) is None else log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
```
然后执行下面的语句更新已有的 UDF
执行如下语句更新已有的 UDF。
```sql
create or replace function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
```
再传入 myfun 两个参数,就会执行失败了
再传入 myfun 两个参数,就会执行失败了
```sql
taos> select myfun(v1, v2) from t;
@ -539,7 +539,7 @@ taos> select myfun(v1, v2) from t;
DB error: udf function execution failure (0.014643s)
```
但遗憾的是我们自定义的异常信息没有展示给用户,而是在插件的日志文件 /var/log/taos/taospyudf.log 中:
自定义的异常信息打印在插件的日志文件 /var/log/taos/taospyudf.log 中。
```text
2023-05-24 23:21:06.790 ERROR [1666188] [doPyUdfScalarProc@507] call pyUdfScalar proc function. context 0x7faade26d180. error: Exception: require 1 parameter but given 2
@ -554,18 +554,17 @@ At:
#### 示例三
编写一个 UDF输入x1, x2, ..., xn, 输出每个值和它们的序号的乘积的和: 1 * x1 + 2 * x2 + ... + n * xn。如果 x1 至 xn 中包含 null则结果为 null。
这个示例与示例一的区别是,可以接受任意多列作为输入,且要处理每一列的值。编写 UDF 文件 /root/udf/nsum.py
输入x1, x2, ..., xn, 输出每个值和它们的序号的乘积的和1 * x1 + 2 * x2 + ... + n * xn。如果 x1 至 xn 中包含 null则结果为 null。
本例与示例一的区别是,可以接受任意多列作为输入,且要处理每一列的值。编写 UDF 文件 /root/udf/nsum.py。
```python
def init():
pass
def destroy():
pass
def process(block):
rows, cols = block.shape()
result = []
@ -581,13 +580,13 @@ def process(block):
return result
```
创建 UDF
创建 UDF
```sql
create function nsum as '/root/udf/nsum.py' outputtype double language 'Python';
```
测试 UDF
测试 UDF
```sql
taos> insert into t values('2023-05-25 09:09:15', 6, null, 8);
@ -606,22 +605,20 @@ Query OK, 4 row(s) in set (0.010653s)
#### 示例四
编写一个 UDF输入一个时间戳输出距离这个时间最近的下一个周日。比如今天是 2023-05-25 则下一个周日是 2023-05-28。
完成这个函数要用到第三方库 momen。先安装这个库
完成这个函数要用到第三方库 momen。先安装这个库
```shell
pip3 install moment
```
然后编写 UDF 文件 /root/udf/nextsunday.py
然后编写 UDF 文件 /root/udf/nextsunday.py
```python
import moment
def init():
pass
def destroy():
pass
@ -636,13 +633,13 @@ def process(block):
for i in range(rows)]
```
UDF 框架会将 TDengine 的 timestamp 类型映射为 Python 的 int 类型所以这个函数只接受一个表示毫秒数的整数。process 方法先做参数检查,然后用 moment 包替换时间的星期为星期日,最后格式化输出。输出的字符串长度是固定的 10 个字符长,因此可以这样创建 UDF 函数
UDF 框架会将 TDengine 的 timestamp 类型映射为 Python 的 int 类型所以这个函数只接受一个表示毫秒数的整数。process 方法先做参数检查,然后用 moment 包替换时间的星期为星期日,最后格式化输出。输出的字符串长度是固定的 10 个字符长,因此可以这样创建 UDF 函数
```sql
create function nextsunday as '/root/udf/nextsunday.py' outputtype binary(10) language 'Python';
```
此时测试函数,如果你是用 systemctl 启动的 taosd肯定会遇到错误
此时测试函数,如果你是用 systemctl 启动的 taosd肯定会遇到错误
```sql
taos> select ts, nextsunday(ts) from t;
@ -655,7 +652,7 @@ DB error: udf function execution failure (1.123615s)
2023-05-25 11:42:34.541 ERROR [1679419] [PyUdf::PyUdf@217] py udf load module failure. error ModuleNotFoundError: No module named 'moment'
```
这是因为 “moment” 所在位置不在 python udf 插件默认的库搜索路径中。怎么确认这一点呢?通过以下命令搜索 taospyudf.log:
这是因为 “moment” 所在位置不在 python udf 插件默认的库搜索路径中。怎么确认这一点呢?通过以下命令搜索 taospyudf.log
```shell
grep 'sys path' taospyudf.log | tail -1
@ -668,7 +665,7 @@ grep 'sys path' taospyudf.log | tail -1
```
发现 python udf 插件默认搜索的第三方库安装路径是: /lib/python3/dist-packages而 moment 默认安装到了 /usr/local/lib/python3.8/dist-packages。下面我们修改 python udf 插件默认的库搜索路径。
先打开 python3 命令行,查看当前的 sys.path
先打开 python3 命令行,查看当前的 sys.path
```python
>>> import sys
@ -676,13 +673,13 @@ grep 'sys path' taospyudf.log | tail -1
'/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages'
```
复制上面脚本的输出的字符串,然后编辑 /var/taos/taos.cfg 加入以下配置
复制上面脚本的输出的字符串,然后编辑 /var/taos/taos.cfg 加入以下配置
```shell
UdfdLdLibPath /usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages
```
保存后执行 systemctl restart taosd, 再测试就不报错了
保存后执行 systemctl restart taosd, 再测试就不报错了
```sql
taos> select ts, nextsunday(ts) from t;
@ -698,7 +695,7 @@ Query OK, 4 row(s) in set (1.011474s)
#### 示例五
编写一个聚合函数,计算某一列最大值和最小值的差。
聚合函数与标量函数的区别是:标量函数是多行输入对应多个输出,聚合函数是多行输入对应一个输出。聚合函数的执行过程有点像经典的 map-reduce 框架的执行过程,框架把数据分成若干块,每个 mapper 处理一个块reducer 再把 mapper 的结果做聚合。不一样的地方在于,对于 TDengine Python UDF 中的 reduce 函数既有 map 的功能又有 reduce 的功能。reduce 函数接受两个参数:一个是自己要处理的数据,一个是别的任务执行 reduce 函数的处理结果。如下面的示例 /root/udf/myspread.py:
聚合函数与标量函数的区别是:标量函数是多行输入对应多个输出,聚合函数是多行输入对应一个输出。聚合函数的执行过程有点像经典的 map-reduce 框架的执行过程,框架把数据分成若干块,每个 mapper 处理一个块reducer 再把 mapper 的结果做聚合。不一样的地方在于,对于 TDengine Python UDF 中的 reduce 函数既有 map 的功能又有 reduce 的功能。reduce 函数接受两个参数:一个是自己要处理的数据,一个是别的任务执行 reduce 函数的处理结果。如下面的示例 /root/udf/myspread.py
```python
import io
@ -707,26 +704,21 @@ import pickle
LOG_FILE: io.TextIOBase = None
def init():
global LOG_FILE
LOG_FILE = open("/var/log/taos/spread.log", "wt")
log("init function myspead success")
def log(o):
LOG_FILE.write(str(o) + '\n')
def destroy():
log("close log file: spread.log")
LOG_FILE.close()
def start():
return pickle.dumps((-math.inf, math.inf))
def reduce(block, buf):
max_number, min_number = pickle.loads(buf)
log(f"initial max_number={max_number}, min_number={min_number}")
@ -741,26 +733,26 @@ def reduce(block, buf):
min_number = v
return pickle.dumps((max_number, min_number))
def finish(buf):
max_number, min_number = pickle.loads(buf)
return max_number - min_number
```
在这个示例中我们不光定义了一个聚合函数,还添加记录执行日志的功能,讲解如下:
1. init 函数不再是空函数,而是打开了一个文件用于写执行日志
2. log 函数是记录日志的工具,自动将传入的对象转成字符串,加换行符输出
3. destroy 函数用来在执行结束关闭文件
4. start 返回了初始的 buffer用来存聚合函数的中间结果我们把最大值初始化为负无穷大最小值初始化为正无穷大
5. reduce 处理每个数据块并聚合结果
6. finish 函数将最终的 buffer 转换成最终的输出
执行下面的 SQL语句创建对应的 UDF
在这个示例中,我们不但定义了一个聚合函数,还增加了记录执行日志的功能。
1. init 函数打开一个文件用于记录日志
2. log 函数记录日志,自动将传入的对象转成字符串,加换行符输出
3. destroy 函数在执行结束后关闭日志文件
4. start 函数返回初始的 buffer用来存聚合函数的中间结果把最大值初始化为负无穷大最小值初始化为正无穷大
5. reduce 函数处理每个数据块并聚合结果
6. finish 函数将 buffer 转换成最终的输出
执行下面 SQL 语句创建对应的 UDF。
```sql
create or replace aggregate function myspread as '/root/udf/myspread.py' outputtype double bufsize 128 language 'Python';
```
这个 SQL 语句与创建标量函数的 SQL 语句有两个重要区别
这个 SQL 语句与创建标量函数的 SQL 语句有两个重要区别
1. 增加了 aggregate 关键字
2. 增加了 bufsize 关键字,用来指定存储中间结果的内存大小,这个数值可以大于实际使用的数值。本例中间结果是两个浮点数组成的 tuple序列化后实际占用大小只有 32 个字节,但指定的 bufsize 是128可以用 python 命令行打印实际占用的字节数
@ -785,7 +777,7 @@ taos> select spread(v1) from t;
Query OK, 1 row(s) in set (0.005501s)
```
最后,查看我们自己打印的执行日志从日志可以看出reduce 函数被执行了 3 次。执行过程中 max 值被更新了 4 次, min 值只被更新 1 次。
最后,查看执行日志,可以看到 reduce 函数被执行了 3 次,执行过程中 max 值被更新了 4 次,min 值只被更新 1 次。
```shell
root@slave11 /var/log/taos $ cat spread.log
@ -854,7 +846,7 @@ pycumsum 使用 numpy 计算输入列所有数据的累积和。
创建标量函数的 SQL 语法如下。
```sql
CREATE OR REPLACE FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
```
各参数说明如下。
- or replace如果函数已经存在则会修改已有的函数属性。
@ -867,8 +859,9 @@ CREATE OR REPLACE FUNCTION function_name AS library_path OUTPUTTYPE output_type
### 创建聚合函数
创建聚合函数的 SQL 语法如下。
```sql
CREATE OR REPLACE AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type LANGUAGE 'Python';
CREATE [OR REPLACE] AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type BUFSIZE buffer_size LANGUAGE 'Python';
```
其中buffer_size 表示中间计算结果的缓冲区大小,单位是字节。其他参数的含义与标量函数相同。
@ -880,7 +873,7 @@ CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUT
### 删除 UDF
删除指定名称的 UDF 的 SQL 语法如下
删除指定名称的 UDF 的 SQL 语法如下
```sql
DROP FUNCTION function_name;
```

View File

@ -17,6 +17,12 @@ TDengine 通过 taosKeeper 将服务器的 CPU、内存、硬盘空间、带宽
- TDengine 已经安装并正常运行,此仪表盘需要 TDengine 3.0.0.0 及以上,并开启监控上报配置,具体配置请参考:[TDengine 监控配置](../taosd/#监控相关)。
- taosAdapter 已经安装并正常运行。具体细节请参考:[taosAdapter 参考手册](../taosadapter)
- taosKeeper 已安装并正常运行。具体细节请参考:[taosKeeper 参考手册](../taoskeeper)
- Grafana 服务已安装并正常运行。我们建议您使用最新的 Grafana 版本TDInsight 支持 Grafana 7.5 及以上版本。
:::info
下文介绍中,都以 Grafana v11.0.0 版本为例,其他版本功能可能有差异,请参考 [Grafana 官网](https://grafana.com/docs/grafana/latest/)。
:::
然后记录以下信息:
@ -24,97 +30,18 @@ TDengine 通过 taosKeeper 将服务器的 CPU、内存、硬盘空间、带宽
- taosAdapter 集群认证信息,可使用用户名及密码。
- taosKeeper 记录监控指标的数据库名称。
## 安装和启动 Grafana
我们建议您使用最新的 Grafana 版本TDInsight 支持 Grafana 7.5 及以上版本。您可以在任何[支持的操作系统](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems)中,按照 [Grafana 官方文档安装说明](https://grafana.com/docs/grafana/latest/installation/) 安装 Grafana。
安装后请参考 [启动 Grafana](https://grafana.com/docs/grafana/latest/setup-grafana/start-restart-grafana/) 启动 Grafana 服务。
安装完成后就可以在 Web 浏览器中打开 Grafana 网址,默认是:`http://localhost:3000`。 默认用户名/密码都是 `admin`。Grafana 会要求在首次登录后更改密码。
:::info
下文介绍中,都以 Grafana v11.0.0 版本为例,其他版本功能可能有差异,请参考 [Grafana 官网](https://grafana.com/docs/grafana/latest/)。
:::
## 安装 TDengine 数据源插件
TDInsight 支持图形界面安装、手动安装和脚本安装三种安装方式,一般建议图形界面安装。对于 Grafana 8.5 以下版本可以使用手动安装和脚本安装方式。
<Tabs defaultValue="manual" groupId="deploy">
<TabItem value="gui" label="图形界面安装">
使用 Grafana 最新版本8.5+),您可以在 Grafana 中[浏览和管理插件](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog)。在 Grafana 管理界面中的 **Configurations > Plugins** 页面直接搜索 `TDengine` 并按照提示安装。
</TabItem>
<TabItem value="manual" label="手动安装">
从 GitHub 安装 TDengine 最新版数据源插件。
```bash
get_latest_release() {
curl --silent "https://api.github.com/repos/taosdata/grafanaplugin/releases/latest" |
grep '"tag_name":' |
sed -E 's/.*"v([^"]+)".*/\1/'
}
TDENGINE_PLUGIN_VERSION=$(get_latest_release)
sudo grafana-cli \
--pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v$TDENGINE_PLUGIN_VERSION/tdengine-datasource-$TDENGINE_PLUGIN_VERSION.zip \
plugins install tdengine-datasource
```
:::note
3.1.6 和更早版本插件需要在配置文件 `/etc/grafana/grafana.ini` 中添加如下设置,以启用未签名插件。
```ini
[plugins]
allow_loading_unsigned_plugins = tdengine-datasource
```
:::
</TabItem>
<TabItem value="auto" label="脚本安装">
我们提供了一个自动化安装脚本 [TDinsight.sh](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) 脚本以便用户快速进行安装配置。
您可以通过 `wget` 或其他工具下载该脚本:
```bash
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh
chmod +x TDinsight.sh
./TDinsight.sh
```
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。
1. 假设您在同一台主机上使用 TDengine 和 Grafana 服务。 运行 `./TDinsight.sh` 并打开 Grafana 页面就可以看到 TDinsight 仪表盘了。
2. 假设您在主机 `tdengine` 上启动 TDengine 数据库taosAdapter 的 HTTP 监听端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:`./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord`
详细的使用方法请参考 [TDinsight.sh 详细说明](./#附录)
</TabItem>
</Tabs>
## 添加 TDengine 数据源
## 安装 TDengine 数据源插件和配置数据源
安装 Grafana TDengine 数据源插件和配置数据源的步骤请参考:[与 Grafana 集成](../../../third-party/visual/grafana/#安装-grafana-plugin-并配置数据源)
安装完毕后, 点击 “Connections” -> “Data sources“ 然后选择 ”tdengine-datasource“输入 TDengine 相关配置:
- Host TDengine 集群中提供 REST 服务的 IP 地址与端口号,默认 `http://localhost:6041`
- UserTDengine 用户名。
- PasswordTDengine 用户密码。
点击 `Save & Test` 进行测试,成功会提示:`TDengine Data source is working`。
## 导入 TDengine V3 仪表盘
## 导入 TDinsightV3 仪表盘
在配置 TDengine 数据源界面,点击 “Dashboards” tab再点击 ”import” 导入 ”TDengine for 3.x” 仪表盘。
导入成功后可以进入这个 dashboard在左上角 ”Log from“ 选项中选择 taosKeeper 中设置的记录监控指标的数据库就可以看到监控结果。
## TDengine V3 仪表盘详情
## TDinsightV3 仪表盘详情
TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态,比如 dnodes、 mnodes、 vnodes 和数据库等。
主要分为集群状态、DNodes 概述、MNode 概述、请求、数据库、DNode 资源使用情况和 taosAdapter 监控信息。下面我们分别详细介绍。

View File

@ -199,7 +199,7 @@ Websocket 和 REST 连接使用驱动类 `com.taosdata.jdbc.rs.RestfulDriver`。
#### URL 规范
TDengine 的 JDBC URL 规范格式为:
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]`
对于建立连接,原生连接与 REST 连接有细微不同。 Websocket 和 REST 连接使用驱动类 `com.taosdata.jdbc.rs.RestfulDriver`。原生连接使用驱动类 `com.taosdata.jdbc.TSDBDriver`。

View File

@ -29,8 +29,8 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
TDengine 版本更新往往会增加新的功能特性,列表中的连接器版本为连接器最佳适配版本。
| **TDengine 版本** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
| ---------------------- | ------------- | ------------------------------------------- | ------------ | ------------- | --------------- | -------- |
| **3.3.0.0 及以上** | 3.3.2.0及以上 | taospy 2.7.15及以上taos-ws-py 0.3.2及以上 | 3.5.5及以上 | 3.1.3及以上 | 3.1.0及以上 | 当前版本 |
| ---------------------- | ----------- | ------------------------------------------- | ------------ | ------------- | --------------- | -------- |
| **3.3.0.0 及以上** | 3.3.0及以上 | taospy 2.7.15及以上taos-ws-py 0.3.2及以上 | 3.5.5及以上 | 3.1.3及以上 | 3.1.0及以上 | 当前版本 |
| **3.0.0.0 及以上** | 3.0.2以上 | 当前版本 | 3.0 分支 | 3.0.0 | 3.1.0 | 当前版本 |
| **2.4.0.14 及以上** | 2.0.38 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 |
| **2.4.0.4 - 2.4.0.13** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 |
@ -43,31 +43,36 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
### 使用原生接口taosc
| **功能特性** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
| ------------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 不支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 不支持 | 支持 |
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 不支持 | 支持 |
| **数据订阅TMQ** | 支持 | 支持 | 支持 | 支持 | 不支持 | 支持 |
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 不支持 | 支持 |
| **功能特性** | **Java** | **Python** | **Go** | **C#** | **Rust** |
| ------------------- | -------- | ---------- | ------ | ------ | -------- |
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 |
| **执行 SQL** | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 |
| **数据订阅TMQ** | 支持 | 支持 | 支持 | 支持 | 支持 |
| **无模式写入** | 支持 | 支持 | 支持 | 支持 | 支持 |
:::info
由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。
:::
### 使用 http (REST 或 WebSocket) 接口
### 使用 http REST 接口
| **功能特性** | **Java** | **Python** | **Go** |
| ------------ | -------- | ---------- | ------ |
| **连接管理** | 支持 | 支持 | 支持 |
| **执行 SQL** | 支持 | 支持 | 支持 |
### 使用 Websocket 接口
| **功能特性** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
| ------------------------------ | -------- | ---------- | ------ | ------ | ----------- | -------- |
| ------------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **执行 SQL** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **数据订阅TMQ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **批量拉取(基于 WebSocket** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **无模式写入** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
:::warning
- 无论选用何种编程语言的连接器2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接或基于线程建立连接池以避免连接内的“USE statement”状态量在线程之间相互干扰但连接的查询和写入操作都是线程安全的
:::

View File

@ -7,23 +7,22 @@ toc_max_heading_level: 4
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/) 快速集成搭建数据监测报警系统整个过程无需任何代码开发TDengine 中数据表的内容可以在仪表盘(DashBoard)上进行可视化展现。关于 TDengine 插件的使用您可以在 [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md) 中了解更多。
## 概述
本文档介绍如何将 TDengine 数据源与开源数据可视化系统 [Grafana](https://www.grafana.com/) 集成,以实现数据的可视化和监测报警系统的搭建。通过 TDengine 插件,您可以轻松地将 TDengine 数据表的数据展示在 Grafana 仪表盘上,且无需进行复杂的开发工作。
## Grafana 版本要求
当前 TDengine 支持 Grafana 7.5 及以上版本,建议使用最新版本。请根据您的系统环境下载并安装对应版本的 Grafana。
## 前置条件
要让 Grafana 能正常添加 TDengine 数据源,需要以下几方面的准备工作。
- Grafana 服务已经部署并正常运行。目前 TDengine 支持 Grafana 7.5 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:[https://grafana.com/grafana/download](https://grafana.com/grafana/download) 。
:::info
- Grafana 服务已经部署并正常运行。
**注意**:要确保启动 Grafana 的账号有其安装目录的写权限,否则可能后面无法安装插件。
:::
- TDengine 集群已经部署并正常运行。
- taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](../../../reference/components/taosadapter)
记录以下信息:
- TDengine 集群 REST API 地址,如:`http://tdengine.local:6041`。
@ -173,14 +172,22 @@ docker run -d \
</TabItem>
</Tabs>
:::info
下文介绍中,都以 Grafana v11.0.0 版本为例,其他版本功能可能有差异,请参考 [Grafana 官网](https://grafana.com/docs/grafana/latest/)。
:::
## 内置变量和自定义变量
## Dashboard 使用指南
本节内容按如下方式组织:
1. 介绍基础知识,包括 Grafana 的内置变量和自定义变量TDengine 对与时序查询的特色语法支持。
2. 介绍如何使用 TDengine 数据源在 Grafana 中创建 Dashboard然后给出使用时序查询的特色语法和如何分组展示数据。
3. 由于配置的 Dashbord 在页面显示时会定时查询 TDengine 来刷新显示,如果 SQL 编写不当会导致严重的性能问题,我们给出了性能优化建议。
4. 最后我们以 TDengine 监控面板 TDinsight 为例介绍了如何导入我们提供的 DashBoard。
### Grafana 内置变量和自定义变量
Grafana 中的 Variable变量功能非常强大可以在 Dashboard 的查询、面板标题、标签等地方使用,用来创建更加动态和交互式的 Dashbord提高用户体验和效率。
变量的主要作用和特点包括:
@ -191,26 +198,30 @@ Grafana 中的 Variable变量功能非常强大可以在 Dashboard 的
- 灵活的配置选项:变量提供了多种配置选项,如预定义的静态值列表、从数据源动态查询值、正则表达式过滤等,使得变量的应用更加灵活和强大。
Grafana 提供了内置变量和自定义变量,它们都可以可以在编写 SQL 时引用,引用的方式是 `$variableName``variableName` 是变量的名字,其他引用方式请参考 [引用方式](https://grafana.com/docs/grafana/latest/dashboards/variables/variable-syntax/)。
### 内置变量
#### 内置变量
Grafana 内置了 `from`、`to` 和 `interval` 等变量,都取自于 Grafana 插件面板。其含义如下:
- `from` 查询范围的起始时间
- `to` 查询范围的结束时间
- `interval` 窗口切分间隔
对于每个查询都建议设置查询范围的起始时间和结束时间,可以有效的减少 TDengine 服务端执行查询扫描的数据量。`interval` 是窗口切分的大小,在 Grafana 11 版本中,其大小为时间间隔和返回点数计算而得。
除了上述三个常用变量Grafana 还提供了如 `__timezone`, `__org`, `__user` 等变量,详情请参考 [内置变量](https://grafana.com/docs/grafana/latest/dashboards/variables/add-template-variables/#global-variables)。
### 自定义变量
#### 自定义变量
我们可以在 Dashbord 中增加自定义变量。自定义变量和内置变量的使用方式没有区别,都是在 SQL 中用 `$variableName` 进行引用。
自定义变量支持多种类型,常见的类型包括 `Query`(查询)、`Constant`(常量)、`Interval`(间隔)、`Data source`(数据源)等。
自定义变量可以引用其他自定义变量,比如一个变量表示区域,另一个变量可以引用区域的值,来查询这个区域的设备。
#### 添加查询类型变量
##### 添加查询类型变量
在 Dashbord 的配置中,选择 【Variables】然后点击 【New variable】
1. 在 “Name“ 字段中,输入你的变量名,此处我们设置变量名为 `selected_groups`。
2. 在 【Select variable type】下拉菜单中选择 ”Query“(查询)。
1. 在 “Name 字段中,输入你的变量名,此处我们设置变量名为 `selected_groups`。
2. 在 【Select variable type】下拉菜单中选择 “Query”(查询)。
根据选择的变量类型,配置相应的选项。例如,如果选择了 “Query” 类型,你需要指定数据源和用于获取变量值的查询语句。此处我们还以智能电表为例,设置查询类型,选择数据源后,配置 SQL 为 `select distinct(groupid) from power.meters where groupid < 3 and ts > $from and ts < $to;`
3. 点击底部的【Run Query】后可以在 “Preview of values”值预览部分查看到根据你的配置生成的变量值。
4. 还有其他配置不再赘述完成配置后点击页面底部的【Apply】应用按钮然后点击右上角的【Save dashboard】保存。
@ -219,9 +230,10 @@ Grafana 内置了 `from`、`to` 和 `interval` 等变量,都取自于 Grafana
我们还可以再新增自定义变量来引用这个 `selected_groups` 变量,比如我们新增一个名为 `tbname_max_current` 的查询变量,其 SQL 为 `select tbname from power.meters where groupid = $selected_groups and ts > $from and ts < $to;`
#### 添加间隔类型变量
##### 添加间隔类型变量
我们可以自定义时间窗口间隔,可以更加贴合业务需求。
1. 在 “Name 字段中,输入变量名为 `interval`。
1. 在 “Name 字段中,输入变量名为 `interval`。
2. 在 【Select variable type】下拉菜单中选择 “Interval”间隔
3. 在 【Interval options】选项中输入 `1s,2s,5s,10s,15s,30s,1m`。
4. 还有其他配置不再赘述完成配置后点击页面底部的【Apply】应用按钮然后点击右上角的【Save dashboard】保存。
@ -234,7 +246,8 @@ Grafana 内置了 `from`、`to` 和 `interval` 等变量,都取自于 Grafana
:::
## TDengine 时间序列查询支持
### TDengine 时序查询支持
TDengine 在支持标准 SQL 的基础之上,还提供了一系列满足时序业务场景需求的特色查询语法,这些语法能够为时序场景的应用的开发带来极大的便利。
- `partition by` 子句可以按一定的维度对数据进行切分,然后在切分出的数据空间内再进行一系列的计算。绝大多数情况可以替代 `group by`。
- `interval` 子句用于产生相等时间周期的窗口
@ -243,19 +256,21 @@ TDengine 在支持标准 SQL 的基础之上,还提供了一系列满足时序
上述特性详细介绍可以参考 [特色查询](../../../reference/taos-sql/distinguished/)。
## 创建 Dashboard
### 创建 Dashboard
回到主界面创建 Dashboard点击【Add Query】进入面板查询页面
有了前面的基础知识,我们可以配置基于 TDengine 数据源的时间序列数据展示 Dashbord。
在 Grafana 主界面创建 Dashboard点击【Add Query】进入面板查询页面
![TDengine Database Grafana plugin create dashboard](./create_dashboard1.webp)
如上图所示,在 ”Query“ 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询。 我们继续用智能电表来举例,为了展示曲线美观,此处**用了虚拟数据**。
如上图所示,在 “Query” 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询。 我们继续用智能电表来举例,为了展示曲线美观,此处**用了虚拟数据**。
#### 时间序列数据展示
## 时间序列数据展示
假设我们想查询一段时间内的平均电流大小,时间窗口按 `$interval` 切分,若某一时间窗口区间数据缺失,填充 null。
- “INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),此处输入:`select _wstart as ts, avg(current) as current from power.meters where groupid in ($selected_groups) and ts > $from and ts < $to interval($interval) fill(null)` 其中from、to 和 interval 为 Grafana 内置变量selected_groups 为自定义变量。
- “ALIAS BY:可设置当前查询别名。
- “GENERATE SQL 点击该按钮会自动替换相应变量,并生成最终执行的语句。
- “INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),此处输入:`select _wstart as ts, avg(current) as current from power.meters where groupid in ($selected_groups) and ts > $from and ts < $to interval($interval) fill(null)` 其中from、to 和 interval 为 Grafana 内置变量selected_groups 为自定义变量。
- “ALIAS BY:可设置当前查询别名。
- “GENERATE SQL 点击该按钮会自动替换相应变量,并生成最终执行的语句。
在顶部的自定义变量中,若选择 `selected_groups` 的值为 1则查询 `meters` 超级表中 `groupid` 为 1 的所有设备电流平均值变化如下图:
@ -267,11 +282,12 @@ TDengine 在支持标准 SQL 的基础之上,还提供了一系列满足时序
:::
## 时间序列数据分组展示
#### 时间序列数据分组展示
假设我们想查询一段时间内的平均电流大小,按 `groupid` 分组展示,我们可以修改之前的 SQL 为 `select _wstart as ts, groupid, avg(current) as current from power.meters where ts > $from and ts < $to partition by groupid interval($interval) fill(null)`
- “Group by column(s) **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` 或 `partition by` 查询语句,设置 “Group by“ 列,可以展示多维数据。此处设置 “Group by“ 列名为 `groupid`,可以按 `groupid` 分组展示数据。
- “Group By Format `Group by` 或 `Partition by` 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL将 “Group By Format 设置为 `groupid-{{groupid}}`,展示的 legend 名字为格式化的分组名。
- “Group by column(s) **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` 或 `partition by` 查询语句,设置 “Group by” 列,可以展示多维数据。此处设置 “Group by” 列名为 `groupid`,可以按 `groupid` 分组展示数据。
- “Group By Format `Group by` 或 `Partition by` 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL将 “Group By Format 设置为 `groupid-{{groupid}}`,展示的 legend 名字为格式化的分组名。
完成设置后,按照 `groupid` 分组展示如下图:
@ -279,12 +295,12 @@ TDengine 在支持标准 SQL 的基础之上,还提供了一系列满足时序
> 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。
## 性能建议
### 性能优化建议
- **所有查询加上时间范围**,在时序数据库中,如果不加查询的时间范围,会扫表导致性能低下。常见的 SQL 写法是 `select column_name from db.table where ts > $from and ts < $to;`
- 对于最新状态类型的查询,我们一般建议在**创建数据库的时候打开缓存**`CACHEMODEL` 设置为 last_row 或者 both常见的 SQL 写法是 `select last(column_name) from db.table where ts > $from and ts < $to;`
## 导入 Dashboard
### 导入 Dashboard
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper。
@ -299,30 +315,36 @@ TDengine 在支持标准 SQL 的基础之上,还提供了一系列满足时序
- [15167](https://grafana.com/grafana/dashboards/15167) TDinsight
- [16388](https://grafana.com/grafana/dashboards/16388) Telegraf 采集节点信息的数据展示
## 告警配置简介
### 告警配置流程
## 告警配置
TDengine Grafana 插件支持告警,如果要配置告警,需要以下几个步骤:
1. 配置联络点“Contact points):配置通知渠道,包括 DingDing、Email、Slack、WebHook、Prometheus Alertmanager 等
2. 配置告警通知策略“Notification policies):配置告警发送到哪个通道的路由,以及发送通知的时间和重复频率
3. 配置 “Alert rules“配置详细的告警规则
1. 配置联络点“Contact points):配置通知渠道,包括 DingDing、Email、Slack、WebHook、Prometheus Alertmanager 等
2. 配置告警通知策略“Notification policies):配置告警发送到哪个通道的路由,以及发送通知的时间和重复频率
3. 配置告警规则“Alert rules”配置详细的告警规则
3.1 配置告警名称
3.2 配置查询告警触发条件
3.2 配置查询告警触发条件
3.3 配置规则评估策略
3.4 配置标签和告警通道
3.5 配置通知文案
### 告警配置界面
在Grafana 11 告警界面一共有 6 个 Tab分别是 “Alert rules“、“Contact points“、“Notification policies“、“Silences“、 “Groups“ 和 “Settings“。
- “Alert rules“ 告警规则列表,用于展示和配置告警规则
- “Contact points“ 通知渠道,包括 DingDing、Email、Slack、WebHook、Prometheus Alertmanager 等
- “Notification policies“ 配置告警发送到哪个通道的路由,以及发送通知的时间和重复频率
- “Silences“ 配置告警静默时间段
- “Groups“ 告警组,配置的告警触发后会在这里分组显示
- “Settings“ 提供通过 JSON 方式修改告警配置
### 告警配置界面介绍
在Grafana 11 告警界面一共有 6 个 Tab分别是 “Alert rules”、“Contact points”、“Notification policies”、“Silences”、 “Groups” 和 “Settings”。
- “Alert rules” 告警规则列表,用于展示和配置告警规则
- “Contact points” 通知渠道,包括 DingDing、Email、Slack、WebHook、Prometheus Alertmanager 等
- “Notification policies” 配置告警发送到哪个通道的路由,以及发送通知的时间和重复频率
- “Silences” 配置告警静默时间段
- “Groups” 告警组,配置的告警触发后会在这里分组显示
- “Settings” 提供通过 JSON 方式修改告警配置
### 配置联络点
本节以邮件和飞书为例配置联络点。
#### 配置邮件联络点
在 Grafana 服务的配置文件中添加 SMTP/Emailing 和 Alerting 模块。(以 Linux 系统为例,其配置文件一般位于 `/etc/grafana/grafana.ini`
## 配置邮件联络点
### Grafana Server 配置文件修改
在 Grafana 服务的配置文件中添加 SMTP/Emailing 和 Alerting 模块,以 Linux 系统为例,其配置文件一般位于 `/etc/grafana/grafana.ini`
在配置文件中增加下面内容:
```ini
@ -336,106 +358,110 @@ skip_verify = true
from_address = sender@foxmail.com
```
然后重启 Grafana 服务即可, 以 Linux 系统为例,执行 `systemctl restart grafana-server.service`
然后重启 Grafana 服务以 Linux 系统为例,执行 `systemctl restart grafana-server.service`)即可添加完成
### Grafana 页面创建新联络点
在 Grafana 页面找到 “Home“ -> “Alerting“ -> “Contact points“创建新联络点
”Name“ Email Contact Point
“Integration“选择联络类型这里选择 Email填写邮件接收地址完成后保存联络点
在 Grafana 页面找到 “Home” -> “Alerting” -> “Contact points”创建新联络点
“Name” Email Contact Point
“Integration”选择联络类型这里选择 Email填写邮件接收地址完成后保存联络点
![TDengine Database Grafana plugin alert email](./alert-email.webp)
## 配置飞书联络点
#### 配置飞书联络点
### 飞书机器人配置
1. “飞书工作台“ -> “获取应用“ -> “搜索飞书机器人助手“ -> “新建指令“
按照以下步骤配置飞书机器人:
1. “飞书工作台” -> “获取应用” -> “搜索飞书机器人助手” -> “新建指令”
2. 选择触发器Grafana
3. 选择操作:通过官方机器人发送消息,填写发送对象和发送内容
![TDengine Database Grafana plugin feishu robot](./alert-feishu1.webp)
### Grafana 配置飞书联络点
在 Grafana 页面找到 “Home“ -> “Alerting“ -> “Contact points“ 创建新联络点
“Name“Feishu Contact Point
“Integration“选择联络类型这里选择 Webhook并填写 URL (在飞书机器人助手的 Grafana 触发器 Webhook 地址),完成后保存联络点
在 Grafana 页面找到 “Home” -> “Alerting” -> “Contact points” 创建新联络点
“Name”Feishu Contact Point
“Integration”选择联络类型这里选择 Webhook并填写 URL (在飞书机器人助手的 Grafana 触发器 Webhook 地址),完成后保存联络点
![TDengine Database Grafana plugin feishu contact point](./alert-feishu2.webp)
## 通知策略
### 配置告警通知策略
配置好联络点后可以看到已有一个Default Policy
![TDengine Database Grafana plugin Notification default policy](./alert-notification1.webp)
点击右侧 ”...“ -> ”Edit“,然后编辑默认通知策略,弹出配置窗口:
点击右侧 “...” -> ”Edit”,然后编辑默认通知策略,弹出配置窗口:
![TDengine Database Grafana plugin Notification](./alert-notification2.webp)
然后配置下列参数:
- “Group wait 发送首次告警之前的等待时间。
- “Group interval 发送第一个告警后,为该组发送下一批新告警的等待时间。
- “Repeat interval 成功发送告警后再次重复发送告警的等待时间。
- “Group wait 发送首次告警之前的等待时间。
- “Group interval 发送第一个告警后,为该组发送下一批新告警的等待时间。
- “Repeat interval 成功发送告警后再次重复发送告警的等待时间。
## 配置告警规则
### 配置告警规则
### 配置查询和告警触发条件
以配置智能电表告警为例,告警规则的配置主要包括告警名称、查询和告警触发条件、规则评估策略、标签和告警通道、通知文案。
在需要配置告警的面板中选择 “Edit“ -> “Alert“ -> “New alert rule“。
#### 配置告警名称
在需要配置告警的面板中选择 “Edit” -> “Alert” -> “New alert rule”。
“Enter alert rule name“ (输入告警规则名称):此处以智能电表为例输入 `power meters alert`
#### 配置查询和告警触发条件
在 “Define query and alert condition” (定义查询和告警触发条件) 中配置告警规则。
1. 选择数据源:`TDengine Datasource`
2. 查询语句:
1. “Enter alert rule name“ (输入告警规则名称):此处以智能电表为例输入 `power meters alert`
2. “Define query and alert condition“ (定义查询和告警触发条件)
2.1 选择数据源:`TDengine Datasource`
2.2 查询语句:
```sql
select _wstart as ts, groupid, avg(current) as current from power.meters where ts > $from and ts < $to partition by groupid interval($interval) fill(null)
```
2.3 设置 ”Expression“表达式`Threshold is above 100`
2.4 点击【Set as alert condition】
2.5 “Preview“查看设置的规则的结果
3. 设置 “Expression”表达式`Threshold is above 100`
4. 点击【Set as alert condition】
5. “Preview”查看设置的规则的结果
完成设置后可以看到下面图片展示:
![TDengine Database Grafana plugin Alert Rules](./alert-rules1.webp)
### 配置表达式和计算规则
Grafana 的 “Expression“表达式支持对数据做各种操作和计算其类型分为
1. “Reduce“将所选时间范围内的时间序列值聚合为单个值
1.1 “Function“ 用来设置聚合方法,支持 Min、Max、Last、Mean、Sum 和 Count。
1.2 “Mode“ 支持下面三种:
- “Strict“如果查询不到数据数据会赋值为 NaN。
- “Drop Non-numeric Value“去掉非法数据结果。
- “Replace Non-numeric Value“如果是非法数据使用固定值进行替换。
2. “Threshold“检查时间序列数据是否符合阈值判断条件。当条件为假时返回 0为真则返回1。支持下列方式
Grafana 的 “Expression”表达式支持对数据做各种操作和计算其类型分为
1. “Reduce”将所选时间范围内的时间序列值聚合为单个值
1.1 “Function” 用来设置聚合方法,支持 Min、Max、Last、Mean、Sum 和 Count。
1.2 “Mode” 支持下面三种:
- “Strict”如果查询不到数据数据会赋值为 NaN。
- “Drop Non-numeric Value”去掉非法数据结果。
- “Replace Non-numeric Value”如果是非法数据使用固定值进行替换。
2. “Threshold”检查时间序列数据是否符合阈值判断条件。当条件为假时返回 0为真则返回1。支持下列方式
- Is above (x > y)
- Is below (x < y)
- Is within range (x > y1 AND x < y2)
- Is outside range (x < y1 AND x > y2)
3. “Math:对时间序列的数据进行数学运算。
4. “Resample:更改每个时间序列中的时间戳使其具有一致的时间间隔,以便在它们之间执行数学运算。
5. “Classic condition (legacy) 可配置多个逻辑条件,判断是否触发告警。
3. “Math:对时间序列的数据进行数学运算。
4. “Resample:更改每个时间序列中的时间戳使其具有一致的时间间隔,以便在它们之间执行数学运算。
5. “Classic condition (legacy) 可配置多个逻辑条件,判断是否触发告警。
如上节截图显示,此处我们设置最大值超过 100 触发告警。
### 配置评估策略
#### 配置规则评估策略
![TDengine Database Grafana plugin Alert Evaluation Behavior](./alert-evaluation.webp)
完成下面配置:
- “Folder“设置告警规则所属目录。
- “Evaluation group“设置告警规则评估组。“Evaluation group“ 可以选择已有组或者新建组,新建组可以设置组名和评估时间间隔。
- “Pending period“在告警规则的阈值被触发后异常值持续多长时间可以触发告警合理设置可以避免误报。
- “Folder”设置告警规则所属目录。
- “Evaluation group”设置告警规则评估组。“Evaluation group” 可以选择已有组或者新建组,新建组可以设置组名和评估时间间隔。
- “Pending period”在告警规则的阈值被触发后异常值持续多长时间可以触发告警合理设置可以避免误报。
#### 配置标签和告警通道
### 配置标签和告警通道
![TDengine Database Grafana plugin Alert Labels and Notifications](./alert-labels.webp)
完成下面配置:
- “Labels 将标签添加到规则中,以便进行搜索、静默或路由到通知策略。
- “Contact point 选择联络点,当告警发生时通过设置的联络点进行通知。
- “Labels 将标签添加到规则中,以便进行搜索、静默或路由到通知策略。
- “Contact point 选择联络点,当告警发生时通过设置的联络点进行通知。
### 配置通知文案
#### 配置通知文案
![TDengine Database Grafana plugin Alert Labels and Notifications](./alert-annotations.webp)
设置 “Summary” 和 Description” 后,若告警触发,将会收到告警通知。
设置 “Summary” 和 Description” 后,若告警触发,将会收到告警通知。

View File

@ -18,7 +18,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.3.0</version>
</dependency>
<!-- druid -->
<dependency>

View File

@ -22,7 +22,7 @@ public static void main(String[] args) throws Exception {
dataSource.setMinIdle(10);
dataSource.setMaxActive(10);
dataSource.setMaxWait(30000);
dataSource.setValidationQuery("SELECT SERVER_STATUS()");
dataSource.setValidationQuery("SELECT SERVER_VERSION()");
Connection connection = dataSource.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement

View File

@ -21,7 +21,7 @@ public static void main(String[] args) throws SQLException {
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setMaxLifetime(0); // maximum life time for each connection
config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setConnectionTestQuery("SELECT SERVER_STATUS()"); //validation query
config.setConnectionTestQuery("SELECT SERVER_VERSION()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource

View File

@ -20,7 +20,7 @@ public class DruidPoolBuilder {
dataSource.setMinIdle(poolSize);
dataSource.setMaxActive(poolSize);
dataSource.setMaxWait(30000);
dataSource.setValidationQuery("select server_status()");
dataSource.setValidationQuery("select SERVER_VERSION()");
return dataSource;
}

View File

@ -20,7 +20,7 @@ public class HikariCpBuilder {
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setMaxLifetime(0); // maximum life time for each connection
config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setConnectionTestQuery("select server_status()"); //validation query
config.setConnectionTestQuery("select SERVER_VERSION()"); //validation query
HikariDataSource ds = new HikariDataSource(config);
return ds;

View File

@ -22,6 +22,6 @@
<!--proxool自动侦察各个连接状态的时间间隔(毫秒),侦察到空闲的连接就马上回收,超时的销毁 默认30秒-->
<house-keeping-sleep-time>30000</house-keeping-sleep-time>
<!--用于保持连接的测试语句 -->
<house-keeping-test-sql>select server_status()</house-keeping-test-sql>
<house-keeping-test-sql>select server_version()</house-keeping-test-sql>
</proxool>
</something-else-entirely>

View File

@ -22,7 +22,7 @@ spring.datasource.druid.max-active=5
# max wait time for get connection, ms
spring.datasource.druid.max-wait=60000
spring.datasource.druid.validation-query=select server_status();
spring.datasource.druid.validation-query=select SERVER_VERSION();
spring.datasource.druid.validation-query-timeout=5000
spring.datasource.druid.test-on-borrow=false
spring.datasource.druid.test-on-return=false

View File

@ -12,7 +12,7 @@ spring.datasource.druid.initial-size=5
spring.datasource.druid.min-idle=5
spring.datasource.druid.max-active=5
spring.datasource.druid.max-wait=30000
spring.datasource.druid.validation-query=select server_status();
spring.datasource.druid.validation-query=select SERVER_VERSION();
spring.aop.auto=true
spring.aop.proxy-target-class=true
#mybatis

View File

@ -13,8 +13,8 @@ extern "C" {
void stopRsync();
int32_t startRsync();
int32_t uploadByRsync(const char* id, const char* path);
int32_t downloadRsync(const char* id, const char* path);
int32_t uploadByRsync(const char* id, const char* path, int64_t checkpointId);
int32_t downloadByRsync(const char* id, const char* path, int64_t checkpointId);
int32_t deleteRsync(const char* id);
#ifdef __cplusplus

View File

@ -131,6 +131,14 @@ static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t r
}
}
static FORCE_INLINE int32_t udfColDataGetDataLen(const SUdfColumn *pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
return *(uint16_t*)(pColumn->colData.varLenCol.payload + pColumn->colData.varLenCol.varOffsets[row]);
} else {
return pColumn->colMeta.bytes;
}
}
static FORCE_INLINE bool udfColDataIsNull(const SUdfColumn *pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
if (pColumn->colMeta.type == TSDB_DATA_TYPE_JSON) {

View File

@ -636,7 +636,7 @@ bool nodesExprsHasColumn(SNodeList* pList);
void* nodesGetValueFromNode(SValueNode* pNode);
int32_t nodesSetValueNodeValue(SValueNode* pNode, void* value);
char* nodesGetStrValueFromNode(SValueNode* pNode);
void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
int32_t nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
int32_t nodesMakeValueNodeFromString(char* literal, SValueNode** ppValNode);
int32_t nodesMakeValueNodeFromBool(bool b, SValueNode** ppValNode);
int32_t nodesMakeValueNodeFromInt32(int32_t value, SNode** ppNode);

View File

@ -164,6 +164,7 @@ int32_t tDecodeStreamTaskCheckpointReq(SDecoder* pDecoder, SStreamTaskCheckpoint
typedef struct SStreamHbMsg {
int32_t vgId;
int32_t msgId;
int64_t ts;
int32_t numOfTasks;
SArray* pTaskStatus; // SArray<STaskStatusEntry>
SArray* pUpdateNodes; // SArray<int32_t>, needs update the epsets in stream tasks for those nodes.

View File

@ -529,6 +529,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_VND_META_DATA_UNSAFE_DELETE TAOS_DEF_ERROR_CODE(0, 0x0535)
#define TSDB_CODE_VND_COLUMN_COMPRESS_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0536)
#define TSDB_CODE_VND_ARB_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0537) // internal
#define TSDB_CODE_VND_WRITE_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0538) // internal
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)

View File

@ -80,6 +80,11 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
(void)memcpy(target, buf, TSDB_PASSWORD_LEN);
}
static FORCE_INLINE int32_t taosHashBinary(char* pBuf, int32_t len) {
uint64_t hashVal = MurmurHash3_64(pBuf, len);
return sprintf(pBuf, "%" PRIu64, hashVal);
}
static FORCE_INLINE int32_t taosCreateMD5Hash(char *pBuf, int32_t len) {
T_MD5_CTX ctx;
tMD5Init(&ctx);
@ -87,11 +92,10 @@ static FORCE_INLINE int32_t taosCreateMD5Hash(char *pBuf, int32_t len) {
tMD5Final(&ctx);
char *p = pBuf;
int32_t resLen = 0;
for (uint8_t i = 0; i < tListLen(ctx.digest); ++i) {
resLen += snprintf(p, 3, "%02x", ctx.digest[i]);
p += 2;
}
return resLen;
return sprintf(pBuf, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", ctx.digest[0], ctx.digest[1],
ctx.digest[2], ctx.digest[3], ctx.digest[4], ctx.digest[5], ctx.digest[6], ctx.digest[7],
ctx.digest[8], ctx.digest[9], ctx.digest[10], ctx.digest[11], ctx.digest[12], ctx.digest[13],
ctx.digest[14], ctx.digest[15]);
}
static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, int32_t method, int32_t prefix,

View File

@ -2141,6 +2141,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
taosWUnLockLatch(&tmq->lock);
}
setVgIdle(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
tmqFreeRspWrapper(pRspWrapper);
taosFreeQitem(pRspWrapper);
} else if (pRspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_DATA_RSP) {
SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)pRspWrapper;
@ -2844,6 +2845,7 @@ int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) {
pWrapper->epoch = head->epoch;
(void)memcpy(&pWrapper->msg, pMsg->pData, sizeof(SMqRspHead));
if (tDecodeSMqAskEpRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pWrapper->msg) == NULL){
tmqFreeRspWrapper((SMqRspWrapper*)pWrapper);
taosFreeQitem(pWrapper);
}else{
(void)taosWriteQitem(tmq->mqueue, pWrapper);

View File

@ -163,7 +163,7 @@ int32_t startRsync() {
return code;
}
int32_t uploadByRsync(const char* id, const char* path) {
int32_t uploadByRsync(const char* id, const char* path, int64_t checkpointId) {
int64_t st = taosGetTimestampMs();
char command[PATH_MAX] = {0};
@ -203,12 +203,12 @@ int32_t uploadByRsync(const char* id, const char* path) {
// prepare the data directory
int32_t code = execCommand(command);
if (code != 0) {
uError("[rsync] s-task:%s prepare checkpoint data in %s to %s failed, code:%d," ERRNO_ERR_FORMAT, id, path,
uError("[rsync] s-task:%s prepare checkpoint dir in %s to %s failed, code:%d," ERRNO_ERR_FORMAT, id, path,
tsSnodeAddress, code, ERRNO_ERR_DATA);
code = TAOS_SYSTEM_ERROR(errno);
} else {
int64_t el = (taosGetTimestampMs() - st);
uDebug("[rsync] s-task:%s prepare checkpoint data in:%s to %s successfully, elapsed time:%" PRId64 "ms", id, path,
uDebug("[rsync] s-task:%s prepare checkpoint dir in:%s to %s successfully, elapsed time:%" PRId64 "ms", id, path,
tsSnodeAddress, el);
}
@ -222,7 +222,7 @@ int32_t uploadByRsync(const char* id, const char* path) {
#endif
snprintf(command, PATH_MAX,
"rsync -av --debug=all --log-file=%s/rsynclog --delete --timeout=10 --bwlimit=100000 %s/ "
"rsync://%s/checkpoint/%s/data/",
"rsync://%s/checkpoint/%s/%" PRId64 "/",
tsLogDir,
#ifdef WINDOWS
pathTransform
@ -230,11 +230,11 @@ int32_t uploadByRsync(const char* id, const char* path) {
path
#endif
,
tsSnodeAddress, id);
tsSnodeAddress, id, checkpointId);
} else {
snprintf(command, PATH_MAX,
"rsync -av --debug=all --log-file=%s/rsynclog --delete --timeout=10 --bwlimit=100000 %s "
"rsync://%s/checkpoint/%s/data/",
"rsync://%s/checkpoint/%s/%" PRId64 "/",
tsLogDir,
#ifdef WINDOWS
pathTransform
@ -242,7 +242,7 @@ int32_t uploadByRsync(const char* id, const char* path) {
path
#endif
,
tsSnodeAddress, id);
tsSnodeAddress, id, checkpointId);
}
code = execCommand(command);
@ -260,7 +260,7 @@ int32_t uploadByRsync(const char* id, const char* path) {
}
// abort from retry if quit
int32_t downloadRsync(const char* id, const char* path) {
int32_t downloadByRsync(const char* id, const char* path, int64_t checkpointId) {
int64_t st = taosGetTimestampMs();
int32_t MAX_RETRY = 10;
int32_t times = 0;
@ -272,6 +272,38 @@ int32_t downloadRsync(const char* id, const char* path) {
#endif
char command[PATH_MAX] = {0};
snprintf(
command, PATH_MAX,
"rsync -av --debug=all --log-file=%s/rsynclog --timeout=10 --bwlimit=100000 rsync://%s/checkpoint/%s/%" PRId64
"/ %s",
tsLogDir, tsSnodeAddress, id, checkpointId,
#ifdef WINDOWS
pathTransform
#else
path
#endif
);
uDebug("[rsync] %s start to sync data from remote to:%s, cmd:%s", id, path, command);
code = execCommand(command);
if (code != TSDB_CODE_SUCCESS) {
uError("[rsync] %s download checkpointId:%" PRId64
" data:%s failed, retry after 1sec, times:%d, code:%d," ERRNO_ERR_FORMAT,
id, checkpointId, path, times, code, ERRNO_ERR_DATA);
} else {
int32_t el = taosGetTimestampMs() - st;
uDebug("[rsync] %s download checkpointId:%" PRId64 " data:%s successfully, elapsed time:%dms", id, checkpointId,
path, el);
}
if (code != TSDB_CODE_SUCCESS) { // if failed, try to load it from data directory
#ifdef WINDOWS
memset(pathTransform, 0, PATH_MAX);
changeDirFromWindowsToLinux(path, pathTransform);
#endif
memset(command, 0, PATH_MAX);
snprintf(
command, PATH_MAX,
"rsync -av --debug=all --log-file=%s/rsynclog --timeout=10 --bwlimit=100000 rsync://%s/checkpoint/%s/data/ %s",
@ -283,19 +315,18 @@ int32_t downloadRsync(const char* id, const char* path) {
#endif
);
uDebug("[rsync] %s start to sync data from remote to:%s, %s", id, path, command);
uDebug("[rsync] %s start to sync data from remote data dir to:%s, cmd:%s", id, path, command);
while (times++ < MAX_RETRY) {
code = execCommand(command);
if (code != TSDB_CODE_SUCCESS) {
uError("[rsync] %s download checkpoint data:%s failed, retry after 1sec, times:%d, code:%d," ERRNO_ERR_FORMAT, id,
path, times, code, ERRNO_ERR_DATA);
taosSsleep(1);
code = TAOS_SYSTEM_ERROR(errno);
uError("[rsync] %s download checkpointId:%" PRId64
" data:%s failed, retry after 1sec, times:%d, code:%d," ERRNO_ERR_FORMAT,
id, checkpointId, path, times, code, ERRNO_ERR_DATA);
code = TAOS_SYSTEM_ERROR(code);
} else {
int32_t el = taosGetTimestampMs() - st;
uDebug("[rsync] %s download checkpoint data:%s successfully, elapsed time:%dms", id, path, el);
break;
uDebug("[rsync] %s download checkpointId:%" PRId64 " data:%s successfully, elapsed time:%dms", id, checkpointId,
path, el);
}
}
return code;

View File

@ -10140,6 +10140,7 @@ void *tDecodeMqSubTopicEp(void *buf, SMqSubTopicEp *pTopicEp) {
buf = tDecodeSMqSubVgEp(buf, &vgEp);
if (taosArrayPush(pTopicEp->vgs, &vgEp) == NULL) {
taosArrayDestroy(pTopicEp->vgs);
pTopicEp->vgs = NULL;
return NULL;
}
}

View File

@ -815,6 +815,12 @@ int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) {
if (IS_CALENDAR_TIME_DURATION(pInterval->intervalUnit)) {
int64_t news = (ts / pInterval->sliding) * pInterval->sliding;
ASSERT(news <= ts);
if (pInterval->slidingUnit == 'd' || pInterval->slidingUnit == 'w') {
#if defined(WINDOWS) && _MSC_VER >= 1900
int64_t timezone = _timezone;
#endif
news += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision));
}
if (news <= ts) {
int64_t prev = news;

View File

@ -293,7 +293,7 @@ int32_t dmStartNotifyThread(SDnodeMgmt *pMgmt) {
(void)taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
if (taosThreadCreate(&pMgmt->notifyThread, &thAttr, dmNotifyThreadFp, pMgmt) != 0) {
code = TAOS_SYSTEM_ERROR(errno);
dError("failed to create notify thread since %s", strerror(code));
dError("failed to create notify thread since %s", tstrerror(code));
return code;
}

View File

@ -126,7 +126,7 @@ static void vmProcessStreamQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
dGTrace("vgId:%d, msg:%p get from vnode-stream queue", pVnode->vgId, pMsg);
int32_t code = vnodeProcessStreamMsg(pVnode->pImpl, pMsg, pInfo);
if (code != 0) {
if (terrno != 0) code = terrno;
terrno = code;
dGError("vgId:%d, msg:%p failed to process stream msg %s since %s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType),
tstrerror(code));
vmSendRsp(pMsg, code);

View File

@ -109,7 +109,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
int32_t svrVer = 0;
(void)taosVersionStrToInt(version, &svrVer);
if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) {
dError("Version not compatible, cli ver: %d, svr ver: %d", pRpc->info.cliVer, svrVer);
dError("Version not compatible, cli ver: %d, svr ver: %d, ip:0x%x", pRpc->info.cliVer, svrVer, pRpc->info.conn.clientIp);
goto _OVER;
}
@ -387,7 +387,7 @@ int32_t dmInitClient(SDnode *pDnode) {
rpcInit.supportBatch = 1;
rpcInit.batchSize = 8 * 1024;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
rpcInit.notWaitAvaliableConn = 1;
rpcInit.notWaitAvaliableConn = 0;
(void)taosVersionStrToInt(version, &(rpcInit.compatibilityVer));

View File

@ -57,6 +57,12 @@ typedef struct SStreamTaskResetMsg {
int32_t transId;
} SStreamTaskResetMsg;
typedef struct SChkptReportInfo {
SArray* pTaskList;
int64_t reportChkpt;
int64_t streamId;
} SChkptReportInfo;
typedef struct SStreamExecInfo {
bool initTaskList;
SArray *pNodeList;
@ -66,7 +72,7 @@ typedef struct SStreamExecInfo {
SArray *pTaskList;
TdThreadMutex lock;
SHashObj *pTransferStateStreams;
SHashObj *pChkptStreams;
SHashObj *pChkptStreams; // use to update the checkpoint info, if all tasks send the checkpoint-report msgs
SHashObj *pStreamConsensus;
SArray *pKilledChkptTrans; // SArray<SStreamTaskResetMsg>
} SStreamExecInfo;
@ -79,6 +85,8 @@ typedef struct SNodeEntry {
bool stageUpdated; // the stage has been updated due to the leader/follower change or node reboot.
SEpSet epset; // compare the epset to identify the vgroup tranferring between different dnodes.
int64_t hbTimestamp; // second
int32_t lastHbMsgId; // latest hb msgId
int64_t lastHbMsgTs;
} SNodeEntry;
typedef struct {
@ -151,6 +159,8 @@ int32_t mndGetConsensusInfo(SHashObj *pHash, int64_t streamId, int32_t numOfTask
void mndAddConsensusTasks(SCheckpointConsensusInfo *pInfo, const SRestoreCheckpointInfo *pRestoreInfo);
void mndClearConsensusRspEntry(SCheckpointConsensusInfo *pInfo);
int64_t mndClearConsensusCheckpointId(SHashObj* pHash, int64_t streamId);
int64_t mndClearChkptReportInfo(SHashObj* pHash, int64_t streamId);
int32_t mndResetChkptReportInfo(SHashObj* pHash, int64_t streamId);
int32_t setStreamAttrInResBlock(SStreamObj *pStream, SSDataBlock *pBlock, int32_t numOfRows);
int32_t setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDataBlock *pBlock, int32_t numOfRows);

View File

@ -142,7 +142,7 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) {
mndConsumerStatusName(pConsumer->status));
MND_TMQ_RETURN_CHECK(tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup, -1, NULL, NULL, &pConsumerNew));
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "clear-csm");
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "clear-csm");
MND_TMQ_NULL_CHECK(pTrans);
MND_TMQ_RETURN_CHECK(mndSetConsumerDropLogs(pTrans, pConsumerNew));
code = mndTransPrepare(pMnode, pTrans);

View File

@ -443,7 +443,7 @@ static int32_t mndInitTimer(SMnode *pMnode) {
(void)taosThreadAttrInit(&thAttr);
(void)taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
if ((code = taosThreadCreate(&pMnode->thread, &thAttr, mndThreadFp, pMnode)) != 0) {
mError("failed to create timer thread since %s", strerror(errno));
mError("failed to create timer thread since %s", tstrerror(code));
TAOS_RETURN(code);
}

View File

@ -419,13 +419,26 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
int32_t nullIndex = 0;
int32_t dataIndex = 0;
for (int16_t i = 0; i < pObj->outputSchema.nCols; i++) {
SColLocation *pos = taosArrayGet(pCreate->fillNullCols, nullIndex);
for (int32_t i = 0; i < pObj->outputSchema.nCols; i++) {
if (nullIndex >= numOfNULL) {
pFullSchema[i].bytes = pObj->outputSchema.pSchema[dataIndex].bytes;
pFullSchema[i].colId = i + 1; // pObj->outputSchema.pSchema[dataIndex].colId;
pFullSchema[i].flags = pObj->outputSchema.pSchema[dataIndex].flags;
strcpy(pFullSchema[i].name, pObj->outputSchema.pSchema[dataIndex].name);
pFullSchema[i].type = pObj->outputSchema.pSchema[dataIndex].type;
dataIndex++;
} else {
SColLocation *pos = NULL;
if (nullIndex < taosArrayGetSize(pCreate->fillNullCols)) {
pos = taosArrayGet(pCreate->fillNullCols, nullIndex);
}
if (pos == NULL) {
mError("invalid null column index, %d", nullIndex);
continue;
}
if (nullIndex >= numOfNULL || i < pos->slotId) {
if (i < pos->slotId) {
pFullSchema[i].bytes = pObj->outputSchema.pSchema[dataIndex].bytes;
pFullSchema[i].colId = i + 1; // pObj->outputSchema.pSchema[dataIndex].colId;
pFullSchema[i].flags = pObj->outputSchema.pSchema[dataIndex].flags;
@ -441,6 +454,8 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
nullIndex++;
}
}
}
taosMemoryFree(pObj->outputSchema.pSchema);
pObj->outputSchema.pSchema = pFullSchema;
}
@ -2139,7 +2154,7 @@ static int32_t refreshNodeListFromExistedStreams(SMnode *pMnode, SArray *pNodeLi
break;
}
SNodeEntry entry = {.hbTimestamp = -1, .nodeId = pTask->info.nodeId};
SNodeEntry entry = {.hbTimestamp = -1, .nodeId = pTask->info.nodeId, .lastHbMsgId = -1};
epsetAssign(&entry.epset, &pTask->info.epSet);
(void)taosHashPut(pHash, &entry.nodeId, sizeof(entry.nodeId), &entry, sizeof(entry));
}
@ -2319,7 +2334,7 @@ void saveTaskAndNodeInfoIntoBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode)
}
if (!exist) {
SNodeEntry nodeEntry = {.hbTimestamp = -1, .nodeId = pTask->info.nodeId};
SNodeEntry nodeEntry = {.hbTimestamp = -1, .nodeId = pTask->info.nodeId, .lastHbMsgId = -1};
epsetAssign(&nodeEntry.epset, &pTask->info.epSet);
void* px = taosArrayPush(pExecNode->pNodeList, &nodeEntry);
@ -2420,7 +2435,7 @@ int32_t mndProcessStreamReqCheckpoint(SRpcMsg *pReq) {
if (pStream != NULL) { // TODO:handle error
code = mndProcessStreamCheckpointTrans(pMnode, pStream, checkpointId, 0, false);
if (code) {
mError("failed to create checkpoint trans, code:%s", strerror(code));
mError("failed to create checkpoint trans, code:%s", tstrerror(code));
}
} else {
// todo: wait for the create stream trans completed, and launch the checkpoint trans
@ -2454,8 +2469,45 @@ int32_t mndProcessStreamReqCheckpoint(SRpcMsg *pReq) {
return 0;
}
static void doAddReportStreamTask(SArray* pList, const SCheckpointReport* pReport) {
bool existed = false;
// valid the info according to the HbMsg
static bool validateChkptReport(const SCheckpointReport *pReport, int64_t reportChkptId) {
STaskId id = {.streamId = pReport->streamId, .taskId = pReport->taskId};
STaskStatusEntry *pTaskEntry = taosHashGet(execInfo.pTaskMap, &id, sizeof(id));
if (pTaskEntry == NULL) {
mError("invalid checkpoint-report msg from task:0x%x, discard", pReport->taskId);
return false;
}
if (pTaskEntry->checkpointInfo.latestId >= pReport->checkpointId) {
mError("s-task:0x%x invalid checkpoint-report msg, checkpointId:%" PRId64 " saved checkpointId:%" PRId64 " discard",
pReport->taskId, pReport->checkpointId, pTaskEntry->checkpointInfo.activeId);
return false;
}
// now the task in checkpoint procedure
if ((pTaskEntry->checkpointInfo.activeId != 0) && (pTaskEntry->checkpointInfo.activeId > pReport->checkpointId)) {
mError("s-task:0x%x invalid checkpoint-report msg, checkpointId:%" PRId64 " active checkpointId:%" PRId64
" discard",
pReport->taskId, pReport->checkpointId, pTaskEntry->checkpointInfo.activeId);
return false;
}
if (reportChkptId >= pReport->checkpointId) {
mError("s-task:0x%x expired checkpoint-report msg, checkpointId:%" PRId64 " already update checkpointId:%" PRId64
" discard",
pReport->taskId, pReport->checkpointId, reportChkptId);
return false;
}
return true;
}
static void doAddReportStreamTask(SArray *pList, int64_t reportChkptId, const SCheckpointReport *pReport) {
bool valid = validateChkptReport(pReport, reportChkptId);
if (!valid) {
return;
}
for (int32_t i = 0; i < taosArrayGetSize(pList); ++i) {
STaskChkptInfo *p = taosArrayGet(pList, i);
if (p == NULL) {
@ -2463,12 +2515,21 @@ static void doAddReportStreamTask(SArray* pList, const SCheckpointReport* pRepor
}
if (p->taskId == pReport->taskId) {
existed = true;
break;
if (p->checkpointId > pReport->checkpointId) {
mError("s-task:0x%x invalid checkpoint-report msg, existed:%" PRId64 " req checkpointId:%" PRId64 ", discard",
pReport->taskId, p->checkpointId, pReport->checkpointId);
} else if (p->checkpointId < pReport->checkpointId) { // expired checkpoint-report msg, update it
mDebug("s-task:0x%x expired checkpoint-report msg in checkpoint-report list update from %" PRId64 "->%" PRId64,
pReport->taskId, p->checkpointId, pReport->checkpointId);
memcpy(p, pReport, sizeof(STaskChkptInfo));
} else {
mWarn("taskId:0x%x already in checkpoint-report list", pReport->taskId);
}
return;
}
}
if (!existed) {
STaskChkptInfo info = {
.streamId = pReport->streamId,
.taskId = pReport->taskId,
@ -2483,7 +2544,9 @@ static void doAddReportStreamTask(SArray* pList, const SCheckpointReport* pRepor
void *p = taosArrayPush(pList, &info);
if (p == NULL) {
mError("failed to put into task list, taskId:0x%x", pReport->taskId);
}
} else {
int32_t size = taosArrayGetSize(pList);
mDebug("stream:0x%"PRIx64" %d tasks has send checkpoint-report", pReport->streamId, size);
}
}
@ -2530,23 +2593,23 @@ int32_t mndProcessCheckpointReport(SRpcMsg *pReq) {
int32_t numOfTasks = (pStream == NULL) ? 0 : mndGetNumOfStreamTasks(pStream);
SArray **pReqTaskList = (SArray **)taosHashGet(execInfo.pChkptStreams, &req.streamId, sizeof(req.streamId));
if (pReqTaskList == NULL) {
SArray *pList = taosArrayInit(4, sizeof(STaskChkptInfo));
if (pList != NULL) {
doAddReportStreamTask(pList, &req);
code = taosHashPut(execInfo.pChkptStreams, &req.streamId, sizeof(req.streamId), &pList, POINTER_BYTES);
SChkptReportInfo *pInfo = (SChkptReportInfo*)taosHashGet(execInfo.pChkptStreams, &req.streamId, sizeof(req.streamId));
if (pInfo == NULL) {
SChkptReportInfo info = {.pTaskList = taosArrayInit(4, sizeof(STaskChkptInfo)), .streamId = req.streamId};
if (info.pTaskList != NULL) {
doAddReportStreamTask(info.pTaskList, info.reportChkpt, &req);
code = taosHashPut(execInfo.pChkptStreams, &req.streamId, sizeof(req.streamId), &info, sizeof(info));
if (code) {
mError("stream:0x%" PRIx64 " failed to put into checkpoint stream", req.streamId);
}
pReqTaskList = (SArray **)taosHashGet(execInfo.pChkptStreams, &req.streamId, sizeof(req.streamId));
pInfo = (SChkptReportInfo *)taosHashGet(execInfo.pChkptStreams, &req.streamId, sizeof(req.streamId));
}
} else {
doAddReportStreamTask(*pReqTaskList, &req);
doAddReportStreamTask(pInfo->pTaskList, pInfo->reportChkpt, &req);
}
int32_t total = taosArrayGetSize(*pReqTaskList);
int32_t total = taosArrayGetSize(pInfo->pTaskList);
if (total == numOfTasks) { // all tasks has send the reqs
mInfo("stream:0x%" PRIx64 " %s all %d tasks send checkpoint-report, checkpoint meta-info for checkpointId:%" PRId64
" will be issued soon",

View File

@ -211,6 +211,10 @@ int32_t mndProcessResetStatusReq(SRpcMsg *pReq) {
SStreamTaskResetMsg* pMsg = pReq->pCont;
mndKillTransImpl(pMnode, pMsg->transId, "");
streamMutexLock(&execInfo.lock);
(void) mndResetChkptReportInfo(execInfo.pChkptStreams, pMsg->streamId);
streamMutexUnlock(&execInfo.lock);
code = mndGetStreamObj(pMnode, pMsg->streamId, &pStream);
if (pStream == NULL || code != 0) {
code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
@ -333,7 +337,8 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
}
tDecoderClear(&decoder);
mDebug("receive stream-meta hb from vgId:%d, active numOfTasks:%d, msgId:%d", req.vgId, req.numOfTasks, req.msgId);
mDebug("receive stream-meta hb from vgId:%d, active numOfTasks:%d, HbMsgId:%d, HbMsgTs:%" PRId64, req.vgId,
req.numOfTasks, req.msgId, req.ts);
pFailedChkpt = taosArrayInit(4, sizeof(SFailedCheckpointInfo));
pOrphanTasks = taosArrayInit(4, sizeof(SOrphanTask));
@ -356,6 +361,31 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
TAOS_RETURN(TSDB_CODE_INVALID_MSG);
}
for(int32_t i = 0; i < taosArrayGetSize(execInfo.pNodeList); ++i) {
SNodeEntry* pEntry = taosArrayGet(execInfo.pNodeList, i);
if (pEntry == NULL) {
continue;
}
if (pEntry->nodeId != req.vgId) {
continue;
}
if ((pEntry->lastHbMsgId == req.msgId) && (pEntry->lastHbMsgTs == req.ts)) {
mError("vgId:%d HbMsgId:%d already handled, bh msg discard", pEntry->nodeId, req.msgId);
terrno = TSDB_CODE_INVALID_MSG;
doSendHbMsgRsp(terrno, &pReq->info, req.vgId, req.msgId);
streamMutexUnlock(&execInfo.lock);
cleanupAfterProcessHbMsg(&req, pFailedChkpt, pOrphanTasks);
return terrno;
} else {
pEntry->lastHbMsgId = req.msgId;
pEntry->lastHbMsgTs = req.ts;
}
}
int32_t numOfUpdated = taosArrayGetSize(req.pUpdateNodes);
if (numOfUpdated > 0) {
mDebug("%d stream node(s) need updated from hbMsg(vgId:%d)", numOfUpdated, req.vgId);
@ -393,6 +423,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
SStreamObj *pStream = NULL;
code = mndGetStreamObj(pMnode, p->id.streamId, &pStream);
if (code) {
mError("stream obj not exist, failed to handle consensus checkpoint-info req, code:%s", tstrerror(code));
continue;
}
@ -426,7 +457,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
addIntoCheckpointList(pFailedChkpt, &info);
// remove failed trans from pChkptStreams
code = taosHashRemove(execInfo.pChkptStreams, &p->id.streamId, sizeof(p->id.streamId));
code = mndResetChkptReportInfo(execInfo.pChkptStreams, p->id.streamId);
if (code) {
mError("failed to remove stream:0x%"PRIx64" in checkpoint stream list", p->id.streamId);
}
@ -484,14 +515,14 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
}
if (pMnode != NULL) { // make sure that the unit test case can work
mndStreamSendUpdateChkptInfoMsg(pMnode);
code = mndStreamSendUpdateChkptInfoMsg(pMnode);
}
streamMutexUnlock(&execInfo.lock);
doSendHbMsgRsp(TSDB_CODE_SUCCESS, &pReq->info, req.vgId, req.msgId);
cleanupAfterProcessHbMsg(&req, pFailedChkpt, pOrphanTasks);
return code;
}

View File

@ -129,6 +129,8 @@ int32_t mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady, SArray **pList) {
goto _err;
}
*allReady = true;
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) {
@ -540,8 +542,7 @@ static int32_t doSetDropActionFromId(SMnode *pMnode, STrans *pTrans, SOrphanTask
}
// The epset of nodeId of this task may have been expired now, let's use the newest epset from mnode.
code = setTransAction(pTrans, pReq, sizeof(SVDropStreamTaskReq), TDMT_STREAM_TASK_DROP, &epset, 0,
TSDB_CODE_VND_INVALID_VGROUP_ID);
code = setTransAction(pTrans, pReq, sizeof(SVDropStreamTaskReq), TDMT_STREAM_TASK_DROP, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID);
if (code != 0) {
taosMemoryFree(pReq);
return code;
@ -812,9 +813,13 @@ void removeExpiredNodeInfo(const SArray *pNodeSnapshot) {
}
if (pEntry->nodeId == p->nodeId) {
p->hbTimestamp = pEntry->hbTimestamp;
void* px = taosArrayPush(pValidList, p);
if (px == NULL) {
mError("failed to put node into list, nodeId:%d", p->nodeId);
} else {
mDebug("vgId:%d ts:%" PRId64 " HbMsgId:%d is valid", p->nodeId, p->hbTimestamp, p->lastHbMsgId);
}
break;
}
@ -899,8 +904,9 @@ void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode) {
ASSERT(taosHashGetSize(pExecNode->pTaskMap) == taosArrayGetSize(pExecNode->pTaskList));
// 2. remove stream entry in consensus hash table
// 2. remove stream entry in consensus hash table and checkpoint-report hash table
(void) mndClearConsensusCheckpointId(execInfo.pStreamConsensus, pStream->uid);
(void) mndClearChkptReportInfo(execInfo.pChkptStreams, pStream->uid);
streamMutexUnlock(&pExecNode->lock);
destroyStreamTaskIter(pIter);
@ -968,9 +974,8 @@ int32_t removeExpiredNodeEntryAndTaskInBuf(SArray *pNodeSnapshot) {
static int32_t doSetUpdateChkptAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) {
SVUpdateCheckpointInfoReq *pReq = taosMemoryCalloc(1, sizeof(SVUpdateCheckpointInfoReq));
if (pReq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
mError("failed to malloc in reset stream, size:%" PRIzu ", code:%s", sizeof(SVUpdateCheckpointInfoReq),
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
tstrerror(terrno));
return terrno;
}
@ -978,12 +983,14 @@ static int32_t doSetUpdateChkptAction(SMnode *pMnode, STrans *pTrans, SStreamTas
pReq->taskId = pTask->id.taskId;
pReq->streamId = pTask->id.streamId;
SArray **pReqTaskList = (SArray **)taosHashGet(execInfo.pChkptStreams, &pTask->id.streamId, sizeof(pTask->id.streamId));
ASSERT(pReqTaskList);
SChkptReportInfo *pStreamItem = (SChkptReportInfo*)taosHashGet(execInfo.pChkptStreams, &pTask->id.streamId, sizeof(pTask->id.streamId));
if (pStreamItem == NULL) {
return TSDB_CODE_INVALID_PARA;
}
int32_t size = taosArrayGetSize(*pReqTaskList);
int32_t size = taosArrayGetSize(pStreamItem->pTaskList);
for(int32_t i = 0; i < size; ++i) {
STaskChkptInfo* pInfo = taosArrayGet(*pReqTaskList, i);
STaskChkptInfo* pInfo = taosArrayGet(pStreamItem->pTaskList, i);
if (pInfo == NULL) {
continue;
}
@ -1058,11 +1065,15 @@ int32_t mndScanCheckpointReportInfo(SRpcMsg *pReq) {
}
mDebug("start to scan checkpoint report info");
streamMutexLock(&execInfo.lock);
while ((pIter = taosHashIterate(execInfo.pChkptStreams, pIter)) != NULL) {
SArray *pList = *(SArray **)pIter;
SChkptReportInfo* px = (SChkptReportInfo *)pIter;
if (taosArrayGetSize(px->pTaskList) == 0) {
continue;
}
STaskChkptInfo *pInfo = taosArrayGet(pList, 0);
STaskChkptInfo *pInfo = taosArrayGet(px->pTaskList, 0);
if (pInfo == NULL) {
continue;
}
@ -1075,12 +1086,11 @@ int32_t mndScanCheckpointReportInfo(SRpcMsg *pReq) {
if (p == NULL) {
mError("failed to put stream into drop list:0x%" PRIx64, pInfo->streamId);
}
continue;
}
int32_t total = mndGetNumOfStreamTasks(pStream);
int32_t existed = (int32_t)taosArrayGetSize(pList);
int32_t existed = (int32_t)taosArrayGetSize(px->pTaskList);
if (total == existed) {
mDebug("stream:0x%" PRIx64 " %s all %d tasks send checkpoint-report, start to update checkpoint-info",
@ -1088,14 +1098,11 @@ int32_t mndScanCheckpointReportInfo(SRpcMsg *pReq) {
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_CHKPT_UPDATE_NAME, false);
if (!conflict) {
code = mndCreateStreamChkptInfoUpdateTrans(pMnode, pStream, pList);
code = mndCreateStreamChkptInfoUpdateTrans(pMnode, pStream, px->pTaskList);
if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) { // remove this entry
void* p = taosArrayPush(pDropped, &pInfo->streamId);
if (p == NULL) {
mError("failed to remove stream:0x%" PRIx64, pInfo->streamId);
} else {
mDebug("stream:0x%" PRIx64 " removed", pInfo->streamId);
}
taosArrayClear(px->pTaskList);
px->reportChkpt = pInfo->checkpointId;
mDebug("stream:0x%" PRIx64 " clear checkpoint-report list", pInfo->streamId);
} else {
mDebug("stream:0x%" PRIx64 " not launch chkpt-meta update trans, due to checkpoint not finished yet",
pInfo->streamId);
@ -1130,6 +1137,8 @@ int32_t mndScanCheckpointReportInfo(SRpcMsg *pReq) {
mDebug("drop %d stream(s) in checkpoint-report list, remain:%d", size, numOfStreams);
}
streamMutexUnlock(&execInfo.lock);
taosArrayDestroy(pDropped);
return TSDB_CODE_SUCCESS;
}
@ -1314,7 +1323,7 @@ int64_t mndClearConsensusCheckpointId(SHashObj* pHash, int64_t streamId) {
int32_t code = 0;
int32_t numOfStreams = taosHashGetSize(pHash);
if (numOfStreams == 0) {
return TSDB_CODE_SUCCESS;
return code;
}
code = taosHashRemove(pHash, &streamId, sizeof(streamId));
@ -1327,6 +1336,35 @@ int64_t mndClearConsensusCheckpointId(SHashObj* pHash, int64_t streamId) {
return code;
}
int64_t mndClearChkptReportInfo(SHashObj* pHash, int64_t streamId) {
int32_t code = 0;
int32_t numOfStreams = taosHashGetSize(pHash);
if (numOfStreams == 0) {
return code;
}
code = taosHashRemove(pHash, &streamId, sizeof(streamId));
if (code == 0) {
mDebug("drop stream:0x%" PRIx64 " in chkpt-report list, remain:%d", streamId, numOfStreams);
} else {
mError("failed to remove stream:0x%"PRIx64" in chkpt-report list, remain:%d", streamId, numOfStreams);
}
return code;
}
int32_t mndResetChkptReportInfo(SHashObj* pHash, int64_t streamId) {
SChkptReportInfo* pInfo = taosHashGet(pHash, &streamId, sizeof(streamId));
if (pInfo != NULL) {
taosArrayClear(pInfo->pTaskList);
mDebug("stream:0x%" PRIx64 " checkpoint-report list cleared, prev report checkpointId:%" PRId64, streamId,
pInfo->reportChkpt);
return 0;
}
return TSDB_CODE_MND_STREAM_NOT_EXIST;
}
static void mndShowStreamStatus(char *dst, SStreamObj *pStream) {
int8_t status = atomic_load_8(&pStream->status);
if (status == STREAM_STATUS__NORMAL) {

View File

@ -643,7 +643,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
char cgroup[TSDB_CGROUP_LEN] = {0};
mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true);
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg, "tmq-reb");
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pMsg, "tmq-reb");
if (pTrans == NULL) {
code = TSDB_CODE_MND_RETURN_VALUE_NULL;
if (terrno != 0) code = terrno;
@ -1079,7 +1079,7 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) {
goto END;
}
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, pMsg, "drop-cgroup");
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pMsg, "drop-cgroup");
MND_TMQ_NULL_CHECK(pTrans);
mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic);
mndTransSetDbName(pTrans, pSub->dbName, NULL);

View File

@ -431,7 +431,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
SQueryPlan *pPlan = NULL;
SMqTopicObj topicObj = {0};
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, pReq, "create-topic");
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-topic");
MND_TMQ_NULL_CHECK(pTrans);
mndTransSetDbName(pTrans, pDb->name, NULL);
MND_TMQ_RETURN_CHECK(mndTransCheckConflict(pMnode, pTrans));

View File

@ -817,6 +817,17 @@ static bool mndCheckStbConflict(const char *conflict, STrans *pTrans) {
return false;
}
static void mndTransLogConflict(STrans *pNew, STrans *pTrans, bool conflict, bool *globalConflict) {
if (conflict) {
mError("trans:%d, db:%s stb:%s type:%d, can't execute since conflict with trans:%d db:%s stb:%s type:%d", pNew->id,
pNew->dbname, pNew->stbname, pNew->conflict, pTrans->id, pTrans->dbname, pTrans->stbname, pTrans->conflict);
*globalConflict = true;
} else {
mInfo("trans:%d, db:%s stb:%s type:%d, not conflict with trans:%d db:%s stb:%s type:%d", pNew->id, pNew->dbname,
pNew->stbname, pNew->conflict, pTrans->id, pTrans->dbname, pTrans->stbname, pTrans->conflict);
}
}
static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
STrans *pTrans = NULL;
void *pIter = NULL;
@ -832,18 +843,18 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
if (pNew->conflict == TRN_CONFLICT_DB) {
if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
if (mndCheckDbConflict(pNew->dbname, pTrans)) conflict = true;
if (mndCheckStbConflict(pNew->stbname, pTrans)) conflict = true;
mndTransLogConflict(pNew, pTrans, mndCheckDbConflict(pNew->dbname, pTrans), &conflict);
mndTransLogConflict(pNew, pTrans, mndCheckStbConflict(pNew->stbname, pTrans), &conflict);
}
}
if (pNew->conflict == TRN_CONFLICT_DB_INSIDE) {
if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
if (pTrans->conflict == TRN_CONFLICT_DB) {
if (mndCheckDbConflict(pNew->dbname, pTrans)) conflict = true;
if (mndCheckStbConflict(pNew->stbname, pTrans)) conflict = true;
mndTransLogConflict(pNew, pTrans, mndCheckDbConflict(pNew->dbname, pTrans), &conflict);
mndTransLogConflict(pNew, pTrans, mndCheckStbConflict(pNew->stbname, pTrans), &conflict);
}
if (pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
if (mndCheckStbConflict(pNew->stbname, pTrans)) conflict = true; // for stb
mndTransLogConflict(pNew, pTrans, mndCheckStbConflict(pNew->stbname, pTrans), &conflict); // for stb
}
}
@ -871,22 +882,16 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
int32_t groupId = *(int32_t *)pGidIter;
if (taosHashGet(pTrans->arbGroupIds, &groupId, sizeof(int32_t)) != NULL) {
taosHashCancelIterate(pNew->arbGroupIds, pGidIter);
conflict = true;
mndTransLogConflict(pNew, pTrans, true, &conflict);
break;
} else {
mndTransLogConflict(pNew, pTrans, false, &conflict);
}
pGidIter = taosHashIterate(pNew->arbGroupIds, pGidIter);
}
}
}
if (conflict) {
mError("trans:%d, db:%s stb:%s type:%d, can't execute since conflict with trans:%d db:%s stb:%s type:%d",
pNew->id, pNew->dbname, pNew->stbname, pNew->conflict, pTrans->id, pTrans->dbname, pTrans->stbname,
pTrans->conflict);
} else {
mInfo("trans:%d, db:%s stb:%s type:%d, not conflict with trans:%d db:%s stb:%s type:%d", pNew->id, pNew->dbname,
pNew->stbname, pNew->conflict, pTrans->id, pTrans->dbname, pTrans->stbname, pTrans->conflict);
}
sdbRelease(pMnode->pSdb, pTrans);
}

View File

@ -362,7 +362,7 @@ int32_t streamTaskSnapReaderClose(SStreamTaskReader* pReader);
int32_t streamTaskSnapRead(SStreamTaskReader* pReader, uint8_t** ppData);
int32_t streamTaskSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, SStreamTaskWriter** ppWriter);
int32_t streamTaskSnapWriterClose(SStreamTaskWriter* ppWriter, int8_t rollback);
int32_t streamTaskSnapWriterClose(SStreamTaskWriter* ppWriter, int8_t rollback, int8_t loadTask);
int32_t streamTaskSnapWrite(SStreamTaskWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t streamStateSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, SStreamStateReader** ppReader);
@ -473,6 +473,7 @@ struct SVnode {
STfs* pTfs;
int32_t diskPrimary;
SMsgCb msgCb;
bool disableWrite;
// Buffer Pool
TdThreadMutex mutex;

View File

@ -315,12 +315,14 @@ int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType) {
for (;;) {
ret = tdbTbcNext((TBC *)pTbCur->pDbc, &pTbCur->pKey, &pTbCur->kLen, &pTbCur->pVal, &pTbCur->vLen);
if (ret < 0) {
return -1;
return ret;
}
tDecoderClear(&pTbCur->mr.coder);
(void)metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey);
ret = metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey);
if (ret) return ret;
if (pTbCur->mr.me.type == jumpTableType) {
continue;
}
@ -1232,6 +1234,9 @@ int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
SIdxCursor *pCursor = NULL;
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
if (!pCursor) {
TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY);
}
pCursor->pMeta = pMeta;
pCursor->suid = param->suid;
pCursor->cid = param->cid;
@ -1425,6 +1430,11 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) {
int32_t len = 0;
if (metaGetTableTagByUid(pMeta, suid, p->uid, &val, &len, false) == 0) {
p->pTagVal = taosMemoryMalloc(len);
if (!p->pTagVal) {
if (isLock) metaULock(pMeta);
TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY);
}
memcpy(p->pTagVal, val, len);
tdbFree(val);
} else {
@ -1439,6 +1449,9 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) {
int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) {
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 1);
if (!pCur) {
TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY);
}
// If len > 0 means there already have uids, and we only want the
// tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept

View File

@ -192,7 +192,7 @@ int32_t streamTaskSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, SStreamTa
return code;
}
int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) {
int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback, int8_t loadTask) {
int32_t code = 0;
STQ* pTq = pWriter->pTq;
@ -213,6 +213,10 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) {
}
streamMetaWUnLock(pTq->pStreamMeta);
taosMemoryFree(pWriter);
if (loadTask == 1) {
streamMetaLoadAllTasks(pTq->pStreamMeta);
}
return code;
_err:

View File

@ -417,7 +417,6 @@ int32_t tqStreamTaskProcessDispatchRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) {
return code;
} else {
tqDebug("vgId:%d failed to handle the dispatch rsp, since find task:0x%x failed", vgId, pRsp->upstreamTaskId);
terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST;
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
}
}
@ -563,7 +562,7 @@ int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg)
pTask->id.idStr, req.downstreamTaskId, req.downstreamNodeId);
}
code = streamProcessCheckpointReadyMsg(pTask, req.checkpointId, req.downstreamTaskId, req.downstreamNodeId);
code = streamProcessCheckpointReadyMsg(pTask, req.checkpointId, req.downstreamNodeId, req.downstreamTaskId);
streamMetaReleaseTask(pMeta, pTask);
if (code) {
return code;
@ -996,7 +995,13 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg)
int64_t checkpointId = 0;
streamTaskGetActiveCheckpointInfo(pTask, &transId, &checkpointId);
ASSERT(checkpointId == pReq->checkpointId);
if (checkpointId != pReq->checkpointId) {
tqError("s-task:%s invalid checkpoint-trigger retrieve msg from 0x%" PRIx64 ", current checkpointId:%" PRId64
" req:%" PRId64,
pTask->id.idStr, pReq->downstreamTaskId, checkpointId, pReq->checkpointId);
streamMetaReleaseTask(pMeta, pTask);
return TSDB_CODE_INVALID_MSG;
}
if (streamTaskAlreadySendTrigger(pTask, pReq->downstreamNodeId)) {
// re-send the lost checkpoint-trigger msg to downstream task
@ -1239,8 +1244,8 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
// discard the rsp, since it is expired.
if (req.startTs < pTask->execInfo.created) {
tqWarn("s-task:%s vgId:%d create time:%" PRId64 " recv expired consensus checkpointId:%" PRId64
" from task createTs:%" PRId64 ", discard",
pTask->id.idStr, pMeta->vgId, pTask->execInfo.created, req.checkpointId, req.startTs);
" from task createTs:%" PRId64 " < task createTs:%" PRId64 ", discard",
pTask->id.idStr, pMeta->vgId, pTask->execInfo.created, req.checkpointId, req.startTs, pTask->execInfo.created);
streamMetaAddFailedTaskSelf(pTask, now);
streamMetaReleaseTask(pMeta, pTask);
return TSDB_CODE_SUCCESS;

View File

@ -1703,8 +1703,14 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache
if (!remainCols) {
remainCols = taosArrayInit(num_keys, sizeof(SIdxKey));
if (!remainCols) {
TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY);
}
(void)taosArrayPush(remainCols, &(SIdxKey){i, key});
}
if (NULL == taosArrayPush(remainCols, &(SIdxKey){i, key})) {
taosArrayDestroy(remainCols);
TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY);
};
}
}

View File

@ -3519,8 +3519,10 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
resetTableListIndex(&pReader->status);
}
if (code == TSDB_CODE_SUCCESS) {
// set the correct start position according to the query time window
initBlockDumpInfo(pReader, pBlockIter);
}
taosArrayDestroy(pTableList);
return code;
}
@ -4706,8 +4708,7 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi
pReader->pSchemaMap = tSimpleHashInit(8, taosFastHash);
if (pReader->pSchemaMap == NULL) {
tsdbError("failed init schema hash for reader %s", pReader->idStr);
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
TSDB_CHECK_NULL(pReader->pSchemaMap, code, lino, _err, terrno);
}
tSimpleHashSetFreeFp(pReader->pSchemaMap, freeSchemaFunc);

View File

@ -1074,8 +1074,12 @@ int32_t doAdjustValidDataIters(SArray* pLDIterList, int32_t numOfFileObj) {
int32_t inc = numOfFileObj - size;
for (int32_t k = 0; k < inc; ++k) {
SLDataIter* pIter = taosMemoryCalloc(1, sizeof(SLDataIter));
if (!pIter) {
return terrno;
}
void* px = taosArrayPush(pLDIterList, &pIter);
if (px == NULL) {
taosMemoryFree(pIter);
return TSDB_CODE_OUT_OF_MEMORY;
}
}

View File

@ -403,6 +403,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
pVnode->msgCb = msgCb;
(void)taosThreadMutexInit(&pVnode->lock, NULL);
pVnode->blocked = false;
pVnode->disableWrite = false;
(void)tsem_init(&pVnode->syncSem, 0, 0);
(void)taosThreadMutexInit(&pVnode->mutex, NULL);

View File

@ -609,7 +609,10 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, SSnapshotParam *pParam, SVSnapWriter
int64_t sver = pParam->start;
int64_t ever = pParam->end;
// cancel and disable all bg task
// disable write, cancel and disable all bg tasks
(void)taosThreadMutexLock(&pVnode->mutex);
pVnode->disableWrite = true;
(void)taosThreadMutexUnlock(&pVnode->mutex);
(void)vnodeCancelAndDisableAllBgTask(pVnode);
// alloc
@ -722,7 +725,8 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
}
if (pWriter->pStreamTaskWriter) {
code = streamTaskSnapWriterClose(pWriter->pStreamTaskWriter, rollback);
code = streamTaskSnapWriterClose(pWriter->pStreamTaskWriter, rollback, pWriter->pStreamStateWriter == NULL ? 1 : 0);
if (code) goto _exit;
}
@ -741,6 +745,9 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
}
(void)vnodeBegin(pVnode);
(void)taosThreadMutexLock(&pVnode->mutex);
pVnode->disableWrite = false;
(void)taosThreadMutexUnlock(&pVnode->mutex);
_exit:
if (code) {

View File

@ -518,6 +518,14 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
void *pReq;
int32_t len;
(void)taosThreadMutexLock(&pVnode->mutex);
if (pVnode->disableWrite) {
(void)taosThreadMutexUnlock(&pVnode->mutex);
vError("vgId:%d write is disabled for snapshot, version:%" PRId64, TD_VID(pVnode), ver);
return TSDB_CODE_VND_WRITE_DISABLED;
}
(void)taosThreadMutexUnlock(&pVnode->mutex);
if (ver <= pVnode->state.applied) {
vError("vgId:%d, duplicate write request. ver: %" PRId64 ", applied: %" PRId64 "", TD_VID(pVnode), ver,
pVnode->state.applied);

View File

@ -245,8 +245,10 @@ _error:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (pInfo != NULL) {
pInfo->pTableList = NULL;
destroyCacheScanOperator(pInfo);
}
if (pOperator != NULL) {
pOperator->info = NULL;
destroyOperator(pOperator);

View File

@ -390,8 +390,14 @@ static int32_t initExchangeOperator(SExchangePhysiNode* pExNode, SExchangeInfo*
}
initLimitInfo(pExNode->node.pLimit, pExNode->node.pSlimit, &pInfo->limitInfo);
pInfo->self = taosAddRef(exchangeObjRefPool, pInfo);
int64_t refId = taosAddRef(exchangeObjRefPool, pInfo);
if (refId < 0) {
int32_t code = terrno;
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
return code;
} else {
pInfo->self = refId;
}
return initDataSource(numOfSources, pInfo, id);
}
@ -480,6 +486,7 @@ void freeSourceDataInfo(void* p) {
void doDestroyExchangeOperatorInfo(void* param) {
SExchangeInfo* pExInfo = (SExchangeInfo*)param;
if (pExInfo->pFetchRpcHandles) {
for (int32_t i = 0; i < pExInfo->pFetchRpcHandles->size; ++i) {
int64_t* pRpcHandle = taosArrayGet(pExInfo->pFetchRpcHandles, i);
if (*pRpcHandle > 0) {
@ -488,6 +495,7 @@ void doDestroyExchangeOperatorInfo(void* param) {
}
}
taosArrayDestroy(pExInfo->pFetchRpcHandles);
}
taosArrayDestroy(pExInfo->pSources);
taosArrayDestroyEx(pExInfo->pSourceDataInfo, freeSourceDataInfo);

View File

@ -1781,7 +1781,7 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) {
pExp->base.resSchema =
createResSchema(pType->type, pType->bytes, slotId, pType->scale, pType->precision, pValNode->node.aliasName);
pExp->base.pParam[0].type = FUNC_PARAM_TYPE_VALUE;
nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param);
code = nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param);
} else if (type == QUERY_NODE_FUNCTION) {
pExp->pExpr->nodeType = QUERY_NODE_FUNCTION;
SFunctionNode* pFuncNode = (SFunctionNode*)pNode;
@ -1811,13 +1811,11 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) {
if (TSDB_CODE_SUCCESS == code) {
code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&res);
}
if (TSDB_CODE_SUCCESS != code) { // todo handle error
} else {
QUERY_CHECK_CODE(code, lino, _end);
res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
code = nodesListAppend(pFuncNode->pParameterList, (SNode*)res);
QUERY_CHECK_CODE(code, lino, _end);
}
}
#endif
int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList);
@ -1826,7 +1824,7 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) {
QUERY_CHECK_NULL(pExp->base.pParam, code, lino, _end, terrno);
pExp->base.numOfParams = numOfParam;
for (int32_t j = 0; j < numOfParam; ++j) {
for (int32_t j = 0; j < numOfParam && TSDB_CODE_SUCCESS == code; ++j) {
SNode* p1 = nodesListGetNode(pFuncNode->pParameterList, j);
QUERY_CHECK_NULL(p1, code, lino, _end, terrno);
if (p1->type == QUERY_NODE_COLUMN) {
@ -1839,7 +1837,8 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) {
} else if (p1->type == QUERY_NODE_VALUE) {
SValueNode* pvn = (SValueNode*)p1;
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param);
code = nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param);
QUERY_CHECK_CODE(code, lino, _end);
}
}
} else if (type == QUERY_NODE_OPERATOR) {
@ -1871,13 +1870,10 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) {
SLogicConditionNode* pCond = (SLogicConditionNode*)pNode;
pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
QUERY_CHECK_NULL(pExp->base.pParam, code, lino, _end, terrno);
if (TSDB_CODE_SUCCESS == code) {
pExp->base.numOfParams = 1;
SDataType* pType = &pCond->node.resType;
pExp->base.resSchema = createResSchema(pType->type, pType->bytes, slotId, pType->scale, pType->precision, pCond->node.aliasName);
pExp->pExpr->_optrRoot.pRootNode = pNode;
}
} else {
ASSERT(0);
}

View File

@ -179,7 +179,7 @@ int32_t createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode*
return code;
_error:
destroyProjectOperatorInfo(pInfo);
if (pInfo != NULL) destroyProjectOperatorInfo(pInfo);
if (pOperator != NULL) {
pOperator->info = NULL;
destroyOperator(pOperator);
@ -531,7 +531,7 @@ int32_t createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
return code;
_error:
destroyIndefinitOperatorInfo(pInfo);
if (pInfo != NULL) destroyIndefinitOperatorInfo(pInfo);
if (pOperator != NULL) {
pOperator->info = NULL;
destroyOperator(pOperator);

View File

@ -1568,6 +1568,8 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint6
static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbUid, TSKEY startTs, TSKEY endTs,
int64_t maxVersion) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
STableKeyInfo tblInfo = {.uid = tbUid, .groupId = 0};
STableScanInfo* pTableScanInfo = pTableScanOp->info;
@ -1582,37 +1584,33 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
SSDataBlock* pBlock = pTableScanInfo->pResBlock;
STsdbReader* pReader = NULL;
int32_t code = pAPI->tsdReader.tsdReaderOpen(pTableScanInfo->base.readHandle.vnode, &cond, &tblInfo, 1, pBlock,
code = pAPI->tsdReader.tsdReaderOpen(pTableScanInfo->base.readHandle.vnode, &cond, &tblInfo, 1, pBlock,
(void**)&pReader, GET_TASKID(pTaskInfo), NULL);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
T_LONG_JMP(pTaskInfo->env, code);
return NULL;
}
QUERY_CHECK_CODE(code, lino, _end);
bool hasNext = false;
code = pAPI->tsdReader.tsdNextDataBlock(pReader, &hasNext);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
T_LONG_JMP(pTaskInfo->env, code);
return NULL;
}
QUERY_CHECK_CODE(code, lino, _end);
if (hasNext) {
SSDataBlock* p = NULL;
code = pAPI->tsdReader.tsdReaderRetrieveDataBlock(pReader, &p, NULL);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
QUERY_CHECK_CODE(code, lino, _end);
doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, pBlock->info.rows);
pBlock->info.id.groupId = tableListGetTableGroupId(pTableScanInfo->base.pTableListInfo, pBlock->info.id.uid);
}
_end:
pAPI->tsdReader.tsdReaderClose(pReader);
qDebug("retrieve prev rows:%" PRId64 ", skey:%" PRId64 ", ekey:%" PRId64 " uid:%" PRIu64 ", max ver:%" PRId64
", suid:%" PRIu64,
pBlock->info.rows, startTs, endTs, tbUid, maxVersion, cond.suid);
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
terrno = code;
return NULL;
}
return pBlock->info.rows > 0 ? pBlock : NULL;
}
@ -2259,6 +2257,10 @@ static int32_t generatePartitionDelResBlock(SStreamScanInfo* pInfo, SSDataBlock*
uint64_t srcUid = srcUidData[delI];
char tbname[VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN] = {0};
SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, srcUid, srcStartTsCol[delI], srcEndTsCol[delI], ver);
if (!pPreRes) {
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
continue;
}
code = blockDataEnsureCapacity(pDestBlock, pDestBlock->info.rows + pPreRes->info.rows);
QUERY_CHECK_CODE(code, lino, _end);
for (int32_t preJ = 0; preJ < pPreRes->info.rows; preJ++) {
@ -2331,6 +2333,10 @@ static int32_t generateDeleteResultBlockImpl(SStreamScanInfo* pInfo, SSDataBlock
if (winCode != TSDB_CODE_SUCCESS) {
SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, srcUid, srcStartTsCol[i], srcStartTsCol[i], ver);
if (!pPreRes) {
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
continue;
}
printDataBlock(pPreRes, "pre res", GET_TASKID(pInfo->pStreamScanOp->pTaskInfo));
code = calBlockTbName(pInfo, pPreRes, 0);
QUERY_CHECK_CODE(code, lino, _end);
@ -5952,8 +5958,10 @@ _error:
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
pTaskInfo->code = code;
if (pInfo != NULL) {
pInfo->base.pTableListInfo = NULL;
if (pInfo != NULL) destroyTableMergeScanOperatorInfo(pInfo);
destroyTableMergeScanOperatorInfo(pInfo);
}
if (pOperator != NULL) {
pOperator->info = NULL;
destroyOperator(pOperator);

View File

@ -758,7 +758,10 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn
SValueNode* pv = (SValueNode*)nodesListGetNode(pValNode->pNodeList, index);
QUERY_CHECK_NULL(pv, code, lino, _end, terrno);
nodesValueNodeToVariant(pv, &pFillCol[i].fillVal);
code = nodesValueNodeToVariant(pv, &pFillCol[i].fillVal);
}
if (TSDB_CODE_SUCCESS != code) {
goto _end;
}
}
pFillCol->numOfFillExpr = numOfFillExpr;

View File

@ -434,7 +434,7 @@ static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNod
(*pPartialFunc)->originalFuncId = pSrcFunc->hasOriginalFunc ? pSrcFunc->originalFuncId : pSrcFunc->funcId;
char name[TSDB_FUNC_NAME_LEN + TSDB_NAME_DELIMITER_LEN + TSDB_POINTER_PRINT_BYTES + 1] = {0};
int32_t len = snprintf(name, sizeof(name) - 1, "%s.%p", (*pPartialFunc)->functionName, pSrcFunc);
(void)taosCreateMD5Hash(name, len);
(void)taosHashBinary(name, len);
(void)strncpy((*pPartialFunc)->node.aliasName, name, TSDB_COL_NAME_LEN - 1);
(*pPartialFunc)->hasPk = pSrcFunc->hasPk;
(*pPartialFunc)->pkBytes = pSrcFunc->pkBytes;

View File

@ -141,7 +141,7 @@ static int32_t callocNodeChunk(SNodeAllocator* pAllocator, SNodeMemChunk** pOutC
static int32_t nodesCallocImpl(int32_t size, void** pOut) {
if (NULL == g_pNodeAllocator) {
*pOut = taosMemoryCalloc(1, size);
if (!pOut) return TSDB_CODE_OUT_OF_MEMORY;
if (!*pOut) return TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_SUCCESS;
}
@ -2638,11 +2638,12 @@ int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots) {
return num;
}
void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
int32_t nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
int32_t code = 0;
if (pNode->isNull) {
pVal->nType = TSDB_DATA_TYPE_NULL;
pVal->nLen = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
return;
return code;
}
pVal->nType = pNode->node.resType.type;
pVal->nLen = pNode->node.resType.bytes;
@ -2676,13 +2677,21 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_GEOMETRY:
pVal->pz = taosMemoryMalloc(pVal->nLen + 1);
if (pVal->pz) {
memcpy(pVal->pz, pNode->datum.p, pVal->nLen);
pVal->pz[pVal->nLen] = 0;
} else {
code = terrno;
}
break;
case TSDB_DATA_TYPE_JSON:
pVal->nLen = getJsonValueLen(pNode->datum.p);
pVal->pz = taosMemoryMalloc(pVal->nLen);
if (pVal->pz) {
memcpy(pVal->pz, pNode->datum.p, pVal->nLen);
} else {
code = terrno;
}
break;
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
@ -2690,6 +2699,7 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
default:
break;
}
return code;
}
int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc) {

View File

@ -316,15 +316,8 @@ SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) {
// See TS-3398.
// Len of pRawExpr->p could be larger than len of aliasName[TSDB_COL_NAME_LEN].
// If aliasName is truncated, hash value of aliasName could be the same.
T_MD5_CTX ctx;
tMD5Init(&ctx);
tMD5Update(&ctx, (uint8_t*)pRawExpr->p, pRawExpr->n);
tMD5Final(&ctx);
char* p = pExpr->aliasName;
for (uint8_t i = 0; i < tListLen(ctx.digest); ++i) {
sprintf(p, "%02x", ctx.digest[i]);
p += 2;
}
uint64_t hashVal = MurmurHash3_64(pRawExpr->p, pRawExpr->n);
sprintf(pExpr->aliasName, "%"PRIu64, hashVal);
strncpy(pExpr->userAlias, pRawExpr->p, len);
pExpr->userAlias[len] = '\0';
}

View File

@ -4805,7 +4805,7 @@ static int32_t createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr, SNo
strcpy(pFunc->node.aliasName, pCol->colName);
} else {
len = snprintf(buf, sizeof(buf) - 1, "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName);
(void)taosCreateMD5Hash(buf, len);
(void)taosHashBinary(buf, len);
strncpy(pFunc->node.aliasName, buf, TSDB_COL_NAME_LEN - 1);
len = snprintf(buf, sizeof(buf) - 1, "%s(%s)", pSrcFunc->functionName, pCol->colName);
// note: userAlias could be truncated here
@ -4813,7 +4813,7 @@ static int32_t createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr, SNo
}
} else {
len = snprintf(buf, sizeof(buf) - 1, "%s(%s)", pSrcFunc->functionName, pExpr->aliasName);
(void)taosCreateMD5Hash(buf, len);
(void)taosHashBinary(buf, len);
strncpy(pFunc->node.aliasName, buf, TSDB_COL_NAME_LEN - 1);
len = snprintf(buf, sizeof(buf) - 1, "%s(%s)", pSrcFunc->functionName, pExpr->userAlias);
// note: userAlias could be truncated here
@ -11864,7 +11864,7 @@ static int32_t buildCreateTSMAReq(STranslateContext* pCxt, SCreateTSMAStmt* pStm
if (checkRecursiveTsmaInterval(pRecursiveTsma->interval, pRecursiveTsma->unit, pInterval->datum.i,
pInterval->unit, pDbInfo.precision, true)) {
} else {
code = TSDB_CODE_TSMA_INVALID_PARA;
code = TSDB_CODE_TSMA_INVALID_INTERVAL;
}
}
}

View File

@ -3164,7 +3164,7 @@ static void partTagsSetAlias(char* pAlias, const char* pTableAlias, const char*
char name[TSDB_COL_FNAME_LEN + 1] = {0};
int32_t len = snprintf(name, TSDB_COL_FNAME_LEN, "%s.%s", pTableAlias, pColName);
(void)taosCreateMD5Hash(name, len);
(void)taosHashBinary(name, len);
strncpy(pAlias, name, TSDB_COL_NAME_LEN - 1);
}
@ -3827,7 +3827,7 @@ static int32_t rewriteUniqueOptCreateFirstFunc(SFunctionNode* pSelectValue, SNod
int64_t pointer = (int64_t)pFunc;
char name[TSDB_FUNC_NAME_LEN + TSDB_POINTER_PRINT_BYTES + TSDB_NAME_DELIMITER_LEN + 1] = {0};
int32_t len = snprintf(name, sizeof(name) - 1, "%s.%" PRId64 "", pFunc->functionName, pointer);
(void)taosCreateMD5Hash(name, len);
(void)taosHashBinary(name, len);
strncpy(pFunc->node.aliasName, name, TSDB_COL_NAME_LEN - 1);
}
SNode* pNew = NULL;
@ -7197,7 +7197,7 @@ static int32_t tsmaOptCreateWStart(int8_t precision, SFunctionNode** pWStartOut)
int64_t pointer = (int64_t)pWStart;
char name[TSDB_COL_NAME_LEN + TSDB_POINTER_PRINT_BYTES + TSDB_NAME_DELIMITER_LEN + 1] = {0};
int32_t len = snprintf(name, sizeof(name) - 1, "%s.%" PRId64 "", pWStart->functionName, pointer);
(void)taosCreateMD5Hash(name, len);
(void)taosHashBinary(name, len);
strncpy(pWStart->node.aliasName, name, TSDB_COL_NAME_LEN - 1);
pWStart->node.resType.precision = precision;

View File

@ -39,46 +39,100 @@ typedef struct SPhysiPlanContext {
bool hasSysScan;
} SPhysiPlanContext;
static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char* pKey, int32_t keyBufSize) {
int32_t len = 0;
static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char** ppKey, int32_t *pLen) {
int32_t code = 0;
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
SColumnNode* pCol = (SColumnNode*)pNode;
if (NULL != pStmtName) {
if ('\0' != pStmtName[0]) {
len = snprintf(pKey, keyBufSize, "%s.%s", pStmtName, pCol->node.aliasName);
return taosCreateMD5Hash(pKey, len);
*ppKey = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1 + TSDB_COL_NAME_LEN + 1);
if (!*ppKey) {
return terrno;
}
strcat(*ppKey, pStmtName);
strcat(*ppKey, ".");
strcat(*ppKey, pCol->node.aliasName);
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
return code;
} else {
return snprintf(pKey, keyBufSize, "%s", pCol->node.aliasName);
*ppKey = taosMemoryCalloc(1, TSDB_COL_NAME_LEN + 1);
if (!*ppKey) {
return terrno;
}
strcat(*ppKey, pCol->node.aliasName);
*pLen = strlen(*ppKey);
return code;
}
}
if ('\0' == pCol->tableAlias[0]) {
return snprintf(pKey, keyBufSize, "%s", pCol->colName);
*ppKey = taosMemoryCalloc(1, TSDB_COL_NAME_LEN + 1);
if (!*ppKey) {
return terrno;
}
strcat(*ppKey, pCol->colName);
*pLen = strlen(*ppKey);
return code;
}
len = snprintf(pKey, keyBufSize, "%s.%s", pCol->tableAlias, pCol->colName);
return taosCreateMD5Hash(pKey, len);
*ppKey = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1 + TSDB_COL_NAME_LEN + 1);
if (!*ppKey) {
return terrno;
}
strcat(*ppKey, pCol->tableAlias);
strcat(*ppKey, ".");
strcat(*ppKey, pCol->colName);
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
return code;
} else if (QUERY_NODE_FUNCTION == nodeType(pNode)) {
SFunctionNode* pFunc = (SFunctionNode*)pNode;
if (FUNCTION_TYPE_TBNAME == pFunc->funcType) {
SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 0);
if (pVal) {
if (NULL != pStmtName && '\0' != pStmtName[0]) {
len = snprintf(pKey, keyBufSize, "%s.%s", pStmtName, ((SExprNode*)pNode)->aliasName);
return taosCreateMD5Hash(pKey, len);
*ppKey = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1 + TSDB_COL_NAME_LEN + 1);
if (!*ppKey) {
return terrno;
}
len = snprintf(pKey, keyBufSize, "%s.%s", pVal->literal, ((SExprNode*)pNode)->aliasName);
return taosCreateMD5Hash(pKey, len);
strcat(*ppKey, pStmtName);
strcat(*ppKey, ".");
strcat(*ppKey, ((SExprNode*)pNode)->aliasName);
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
return code;
}
*ppKey = taosMemoryCalloc(1, strlen(pVal->literal) + 1 + TSDB_COL_NAME_LEN + 1);
if (!*ppKey) {
return terrno;
}
strcat(*ppKey, pVal->literal);
strcat(*ppKey, ".");
strcat(*ppKey, ((SExprNode*)pNode)->aliasName);
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
return code;
}
}
}
if (NULL != pStmtName && '\0' != pStmtName[0]) {
len = snprintf(pKey, keyBufSize, "%s.%s", pStmtName, ((SExprNode*)pNode)->aliasName);
return taosCreateMD5Hash(pKey, len);
*ppKey = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1 + TSDB_COL_NAME_LEN + 1);
if (!*ppKey) {
return terrno;
}
strcat(*ppKey, pStmtName);
strcat(*ppKey, ".");
strcat(*ppKey, ((SExprNode*)pNode)->aliasName);
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
return code;
}
return snprintf(pKey, keyBufSize, "%s", ((SExprNode*)pNode)->aliasName);
*ppKey = taosMemoryCalloc(1, TSDB_COL_NAME_LEN + 1);
if (!*ppKey) {
return terrno;
}
strcat(*ppKey, ((SExprNode*)pNode)->aliasName);
*pLen = strlen(*ppKey);
return code;
}
static SNode* createSlotDesc(SPhysiPlanContext* pCxt, const char* pName, const SNode* pNode, int16_t slotId,
bool output, bool reserve) {
@ -132,8 +186,8 @@ static int32_t putSlotToHashImpl(int16_t dataBlockId, int16_t slotId, const char
return taosHashPut(pHash, pName, len, &index, sizeof(SSlotIndex));
}
static int32_t putSlotToHash(const char* pName, int16_t dataBlockId, int16_t slotId, SNode* pNode, SHashObj* pHash) {
return putSlotToHashImpl(dataBlockId, slotId, pName, strlen(pName), pHash);
static int32_t putSlotToHash(const char* pName, int32_t len, int16_t dataBlockId, int16_t slotId, SNode* pNode, SHashObj* pHash) {
return putSlotToHashImpl(dataBlockId, slotId, pName, len, pHash);
}
static int32_t createDataBlockDescHash(SPhysiPlanContext* pCxt, int32_t capacity, int16_t dataBlockId,
@ -162,12 +216,16 @@ static int32_t buildDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SD
int16_t slotId = 0;
SNode* pNode = NULL;
FOREACH(pNode, pList) {
char name[TSDB_COL_FNAME_LEN + 1] = {0};
(void)getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pNode, slotId, true, false));
char* name = NULL;
int32_t len = 0;
code = getSlotKey(pNode, NULL, &name, &len);
if (TSDB_CODE_SUCCESS == code) {
code = putSlotToHash(name, pDataBlockDesc->dataBlockId, slotId, pNode, pHash);
code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pNode, slotId, true, false));
}
if (TSDB_CODE_SUCCESS == code) {
code = putSlotToHash(name, len, pDataBlockDesc->dataBlockId, slotId, pNode, pHash);
}
taosMemoryFree(name);
if (TSDB_CODE_SUCCESS == code) {
pDataBlockDesc->totalRowSize += ((SExprNode*)pNode)->resType.bytes;
pDataBlockDesc->outputRowSize += ((SExprNode*)pNode)->resType.bytes;
@ -226,8 +284,10 @@ static int32_t addDataBlockSlotsImpl(SPhysiPlanContext* pCxt, SNodeList* pList,
SNode* pNode = NULL;
FOREACH(pNode, pList) {
SNode* pExpr = QUERY_NODE_ORDER_BY_EXPR == nodeType(pNode) ? ((SOrderByExprNode*)pNode)->pExpr : pNode;
char name[TSDB_COL_FNAME_LEN + 1] = {0};
int32_t len = getSlotKey(pExpr, pStmtName, name, TSDB_COL_FNAME_LEN);
char *name = NULL;
int32_t len = 0;
code = getSlotKey(pExpr, pStmtName, &name, &len);
if (TSDB_CODE_SUCCESS == code) {
SSlotIndex* pIndex = taosHashGet(pHash, name, len);
if (NULL == pIndex) {
code =
@ -244,7 +304,9 @@ static int32_t addDataBlockSlotsImpl(SPhysiPlanContext* pCxt, SNodeList* pList,
} else {
slotId = getUnsetSlotId(pIndex->pSlotIdsInfo);
}
}
taosMemoryFree(name);
if (TSDB_CODE_SUCCESS == code) {
SNode* pTarget = NULL;
code = createTarget(pNode, pDataBlockDesc->dataBlockId, slotId, &pTarget);
@ -315,8 +377,12 @@ static void dumpSlots(const char* pName, SHashObj* pHash) {
static EDealRes doSetSlotId(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode) && 0 != strcmp(((SColumnNode*)pNode)->colName, "*")) {
SSetSlotIdCxt* pCxt = (SSetSlotIdCxt*)pContext;
char name[TSDB_COL_FNAME_LEN + 1] = {0};
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
char *name = NULL;
int32_t len = 0;
pCxt->errCode = getSlotKey(pNode, NULL, &name, &len);
if (TSDB_CODE_SUCCESS != pCxt->errCode) {
return DEAL_RES_ERROR;
}
SSlotIndex* pIndex = taosHashGet(pCxt->pLeftHash, name, len);
if (NULL == pIndex) {
pIndex = taosHashGet(pCxt->pRightHash, name, len);
@ -327,8 +393,10 @@ static EDealRes doSetSlotId(SNode* pNode, void* pContext) {
dumpSlots("left datablock desc", pCxt->pLeftHash);
dumpSlots("right datablock desc", pCxt->pRightHash);
pCxt->errCode = TSDB_CODE_PLAN_INTERNAL_ERROR;
taosMemoryFree(name);
return DEAL_RES_ERROR;
}
taosMemoryFree(name);
((SColumnNode*)pNode)->dataBlockId = pIndex->dataBlockId;
((SColumnNode*)pNode)->slotId = ((SSlotIdInfo*)taosArrayGet(pIndex->pSlotIdsInfo, 0))->slotId;
return DEAL_RES_IGNORE_CHILD;
@ -1174,7 +1242,6 @@ static int32_t createHashJoinColList(int16_t lBlkId, int16_t rBlkId, SNode* pEq1
static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhysiNode* pJoin) {
SNode* pNode = NULL;
char name[TSDB_COL_FNAME_LEN + 1] = {0};
SSHashObj* pHash = tSimpleHashInit(pJoin->pTargets->length, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY));
if (NULL == pHash) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -1185,8 +1252,13 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys
if (TSDB_CODE_SUCCESS == code) {
FOREACH(pNode, pJoin->pTargets) {
SColumnNode* pCol = (SColumnNode*)pNode;
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
code = tSimpleHashPut(pHash, name, len, &pCol, POINTER_BYTES);
char *pName = NULL;
int32_t len = 0;
code = getSlotKey(pNode, NULL, &pName, &len);
if (TSDB_CODE_SUCCESS == code) {
code = tSimpleHashPut(pHash, pName, len, &pCol, POINTER_BYTES);
}
taosMemoryFree(pName);
if (TSDB_CODE_SUCCESS != code) {
break;
}
@ -1197,36 +1269,44 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys
pJoin->pTargets = pNew;
FOREACH(pNode, pJoin->pOnLeft) {
char* pName = NULL;
SColumnNode* pCol = (SColumnNode*)pNode;
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
SNode** p = tSimpleHashGet(pHash, name, len);
int32_t len = 0;
code = getSlotKey(pNode, NULL, &pName, &len);
if (TSDB_CODE_SUCCESS == code) {
SNode** p = tSimpleHashGet(pHash, pName, len);
if (p) {
code = nodesListStrictAppend(pJoin->pTargets, *p);
if (TSDB_CODE_SUCCESS == code) {
code = tSimpleHashRemove(pHash, pName, len);
}
}
}
taosMemoryFree(pName);
if (TSDB_CODE_SUCCESS != code) {
break;
}
code = tSimpleHashRemove(pHash, name, len);
if (TSDB_CODE_SUCCESS != code) {
break;
}
}
}
}
if (TSDB_CODE_SUCCESS == code) {
FOREACH(pNode, pJoin->pOnRight) {
char* pName = NULL;
SColumnNode* pCol = (SColumnNode*)pNode;
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
SNode** p = tSimpleHashGet(pHash, name, len);
int32_t len = 0;
code = getSlotKey(pNode, NULL, &pName, &len);
if (TSDB_CODE_SUCCESS == code) {
SNode** p = tSimpleHashGet(pHash, pName, len);
if (p) {
code = nodesListStrictAppend(pJoin->pTargets, *p);
if (TSDB_CODE_SUCCESS == code) {
code = tSimpleHashRemove(pHash, pName, len);
}
}
}
taosMemoryFree(pName);
if (TSDB_CODE_SUCCESS != code) {
break;
}
code = tSimpleHashRemove(pHash, name, len);
if (TSDB_CODE_SUCCESS != code) {
break;
}
}
}
}
if (TSDB_CODE_SUCCESS == code) {

View File

@ -432,7 +432,7 @@ static int32_t stbSplAppendWStart(SNodeList* pFuncs, int32_t* pIndex, uint8_t pr
int64_t pointer = (int64_t)pWStart;
char name[TSDB_COL_NAME_LEN + TSDB_POINTER_PRINT_BYTES + TSDB_NAME_DELIMITER_LEN + 1] = {0};
int32_t len = snprintf(name, sizeof(name) - 1, "%s.%" PRId64 "", pWStart->functionName, pointer);
(void)taosCreateMD5Hash(name, len);
(void)taosHashBinary(name, len);
strncpy(pWStart->node.aliasName, name, TSDB_COL_NAME_LEN - 1);
pWStart->node.resType.precision = precision;
@ -464,7 +464,7 @@ static int32_t stbSplAppendWEnd(SWindowLogicNode* pWin, int32_t* pIndex) {
int64_t pointer = (int64_t)pWEnd;
char name[TSDB_COL_NAME_LEN + TSDB_POINTER_PRINT_BYTES + TSDB_NAME_DELIMITER_LEN + 1] = {0};
int32_t len = snprintf(name, sizeof(name) - 1, "%s.%" PRId64 "", pWEnd->functionName, pointer);
(void)taosCreateMD5Hash(name, len);
(void)taosHashBinary(name, len);
strncpy(pWEnd->node.aliasName, name, TSDB_COL_NAME_LEN - 1);
code = fmGetFuncInfo(pWEnd, NULL, 0);

View File

@ -631,7 +631,7 @@ SFunctionNode* createGroupKeyAggFunc(SColumnNode* pGroupCol) {
if (TSDB_CODE_SUCCESS == code) {
char name[TSDB_FUNC_NAME_LEN + TSDB_NAME_DELIMITER_LEN + TSDB_POINTER_PRINT_BYTES + 1] = {0};
int32_t len = snprintf(name, sizeof(name) - 1, "%s.%p", pFunc->functionName, pFunc);
(void)taosCreateMD5Hash(name, len);
(void)taosHashBinary(name, len);
strncpy(pFunc->node.aliasName, name, TSDB_COL_NAME_LEN - 1);
}
}

View File

@ -2908,6 +2908,7 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx **gRes, int32_
info->colRangeNum = colNum;
info->colRange = taosMemoryCalloc(colNum, POINTER_BYTES);
if (info->colRange == NULL) {
info->colRangeNum = 0;
FLT_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}

View File

@ -48,6 +48,13 @@ extern "C" {
#define stTrace(...) do { if (stDebugFlag & DEBUG_TRACE) { taosPrintLog("STM ", DEBUG_TRACE, stDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
typedef struct SStreamTmrInfo {
int32_t activeCounter; // make sure only launch one checkpoint trigger check tmr
tmr_h tmrHandle;
int64_t launchChkptId;
int8_t isActive;
} SStreamTmrInfo;
struct SActiveCheckpointInfo {
TdThreadMutex lock;
int32_t transId;
@ -59,12 +66,12 @@ struct SActiveCheckpointInfo {
SArray* pReadyMsgList; // SArray<STaskCheckpointReadyInfo*>
int8_t allUpstreamTriggerRecv;
SArray* pCheckpointReadyRecvList; // SArray<STaskDownstreamReadyInfo>
int32_t checkCounter;
tmr_h pChkptTriggerTmr;
int32_t sendReadyCheckCounter;
tmr_h pSendReadyMsgTmr;
SStreamTmrInfo chkptTriggerMsgTmr;
SStreamTmrInfo chkptReadyMsgTmr;
};
int32_t streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, SStreamTask* pTask);
typedef struct {
int8_t type;
SSDataBlock* pBlock;
@ -222,7 +229,7 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta);
ECHECKPOINT_BACKUP_TYPE streamGetCheckpointBackupType();
int32_t streamTaskDownloadCheckpointData(const char* id, char* path);
int32_t streamTaskDownloadCheckpointData(const char* id, char* path, int64_t checkpointId);
int32_t streamTaskOnNormalTaskReady(SStreamTask* pTask);
int32_t streamTaskOnScanHistoryTaskReady(SStreamTask* pTask);

View File

@ -247,7 +247,7 @@ int32_t rebuildDirFromCheckpoint(const char* path, int64_t chkpId, char** dst) {
} else {
stError("failed to start stream backend at %s, reason: %s, restart from default state dir:%s", chkp,
tstrerror(TAOS_SYSTEM_ERROR(errno)), state);
tstrerror(terrno), state);
code = taosMkDir(state);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);
@ -447,7 +447,7 @@ int32_t rebuildFromRemoteChkp_rsync(const char* key, char* checkpointPath, int64
cleanDir(defaultPath, key);
stDebug("clear local default dir before downloading checkpoint data:%s succ", defaultPath);
code = streamTaskDownloadCheckpointData(key, checkpointPath);
code = streamTaskDownloadCheckpointData(key, checkpointPath, checkpointId);
if (code != 0) {
stError("failed to download checkpoint data:%s", key);
return code;
@ -482,7 +482,7 @@ int32_t rebuildDataFromS3(char* chkpPath, int64_t chkpId) {
int32_t rebuildFromRemoteChkp_s3(const char* key, char* chkpPath, int64_t chkpId, char* defaultPath) {
int8_t rename = 0;
int32_t code = streamTaskDownloadCheckpointData(key, chkpPath);
int32_t code = streamTaskDownloadCheckpointData(key, chkpPath, chkpId);
if (code != 0) {
return code;
}
@ -683,7 +683,7 @@ static int32_t rebuildFromLocalCheckpoint(const char* pTaskIdStr, const char* ch
defaultPath);
}
} else {
code = TSDB_CODE_FAILED;
code = terrno;
stError("%s no valid data for checkpointId:%" PRId64 " in %s", pTaskIdStr, checkpointId, checkpointPath);
}
@ -763,7 +763,7 @@ int32_t restoreCheckpointData(const char* path, const char* key, int64_t chkptId
}
if (code != 0) {
stError("failed to start stream backend at %s, restart from default defaultPath:%s, reason:%s", checkpointPath,
stError("failed to start stream backend at %s, restart from defaultPath:%s, reason:%s", checkpointPath,
defaultPath, tstrerror(code));
code = 0; // reset the error code
}
@ -1452,8 +1452,14 @@ int32_t taskDbBuildSnap(void* arg, SArray* pSnap) {
code = TSDB_CODE_OUT_OF_MEMORY;
break;
}
(void)taosArrayPush(pSnap, &snap);
if (taosArrayPush(pSnap, &snap) == NULL) {
taskDbUnRefChkp(pTaskDb, pTaskDb->chkpId);
taskDbRemoveRef(pTaskDb);
code = terrno;
break;
}
taskDbRemoveRef(pTaskDb);
pIter = taosHashIterate(pMeta->pTaskDbUnique, pIter);
}
streamMutexUnlock(&pMeta->backendMutex);

View File

@ -20,7 +20,7 @@
static int32_t downloadCheckpointDataByName(const char* id, const char* fname, const char* dstName);
static int32_t deleteCheckpointFile(const char* id, const char* name);
static int32_t streamTaskUploadCheckpoint(const char* id, const char* path);
static int32_t streamTaskUploadCheckpoint(const char* id, const char* path, int64_t checkpointId);
static int32_t deleteCheckpoint(const char* id);
static int32_t downloadCheckpointByNameS3(const char* id, const char* fname, const char* dstName);
static int32_t continueDispatchCheckpointTriggerBlock(SStreamDataBlock* pBlock, SStreamTask* pTask);
@ -297,14 +297,26 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
return code;
}
// if previous launched timer not started yet, not start a new timer
// todo: fix this bug: previous set checkpoint-trigger check tmr is running, while we happen to try to launch
// a new checkpoint-trigger timer right now.
// And if we don't start a new timer, and the lost of checkpoint-trigger message may cause the whole checkpoint
// procedure to be stucked.
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1);
if (old == 0) {
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref);
streamMetaAcquireOneTask(pTask);
if (pActiveInfo->pChkptTriggerTmr == NULL) {
pActiveInfo->pChkptTriggerTmr = taosTmrStart(checkpointTriggerMonitorFn, 100, pTask, streamTimer);
if (pTmrInfo->tmrHandle == NULL) {
pTmrInfo->tmrHandle = taosTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer);
} else {
streamTmrReset(checkpointTriggerMonitorFn, 100, pTask, streamTimer, &pActiveInfo->pChkptTriggerTmr, vgId, "trigger-recv-monitor");
streamTmrReset(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor");
}
pTmrInfo->launchChkptId = pActiveInfo->activeId;
} else { // already launched, do nothing
stError("s-task:%s previous checkpoint-trigger monitor tmr is set, not start new one", pTask->id.idStr);
}
}
@ -349,7 +361,6 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
(void)streamTaskBuildCheckpoint(pTask); // todo: not handle error yet
} else { // source & agg tasks need to forward the checkpoint msg downwards
stDebug("s-task:%s process checkpoint-trigger block, all %d upstreams sent, forwards to downstream", id, num);
flushStateDataInExecutor(pTask, (SStreamQueueItem*)pBlock);
// Put the checkpoint-trigger block into outputQ, to make sure all blocks with less version have been handled by
@ -364,8 +375,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
// only when all downstream tasks are send checkpoint rsp, we can start the checkpoint procedure for the agg task
static int32_t processCheckpointReadyHelp(SActiveCheckpointInfo* pInfo, int32_t numOfDownstream,
int32_t downstreamNodeId, int64_t streamId, int32_t downstreamTaskId,
const char* id, int32_t* pNotReady, int32_t* pTransId) {
bool received = false;
const char* id, int32_t* pNotReady, int32_t* pTransId, bool* alreadyRecv) {
*alreadyRecv = false;
int32_t size = taosArrayGetSize(pInfo->pCheckpointReadyRecvList);
for (int32_t i = 0; i < size; ++i) {
STaskDownstreamReadyInfo* p = taosArrayGet(pInfo->pCheckpointReadyRecvList, i);
@ -374,12 +385,12 @@ static int32_t processCheckpointReadyHelp(SActiveCheckpointInfo* pInfo, int32_t
}
if (p->downstreamTaskId == downstreamTaskId) {
received = true;
(*alreadyRecv) = true;
break;
}
}
if (received) {
if (*alreadyRecv) {
stDebug("s-task:%s already recv checkpoint-ready msg from downstream:0x%x, ignore. %d/%d downstream not ready", id,
downstreamTaskId, (int32_t)(numOfDownstream - taosArrayGetSize(pInfo->pCheckpointReadyRecvList)),
numOfDownstream);
@ -415,6 +426,7 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask, int64_t checkpointId
int32_t code = 0;
int32_t notReady = 0;
int32_t transId = 0;
bool alreadyHandled = false;
// 1. not in checkpoint status now
SStreamTaskState pStat = streamTaskGetStatus(pTask);
@ -433,13 +445,18 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask, int64_t checkpointId
streamMutexLock(&pInfo->lock);
code = processCheckpointReadyHelp(pInfo, total, downstreamNodeId, pTask->id.streamId, downstreamTaskId, id, &notReady,
&transId);
&transId, &alreadyHandled);
streamMutexUnlock(&pInfo->lock);
if ((notReady == 0) && (code == 0)) {
if (alreadyHandled) {
stDebug("s-task:%s checkpoint-ready msg checkpointId:%" PRId64 " from task:0x%x already handled, not handle again",
id, checkpointId, downstreamTaskId);
} else {
if ((notReady == 0) && (code == 0) && (!alreadyHandled)) {
stDebug("s-task:%s all downstream tasks have completed build checkpoint, do checkpoint for current task", id);
(void)appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT, checkpointId, transId, -1);
}
}
return code;
}
@ -508,8 +525,8 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV
streamMutexLock(&pTask->lock);
if (pReq->checkpointId <= pInfo->checkpointId) {
stDebug("s-task:%s vgId:%d latest checkpointId:%" PRId64 " checkpointVer:%" PRId64
" no need to update the checkpoint info, updated checkpointId:%" PRId64 " checkpointVer:%" PRId64
stDebug("s-task:%s vgId:%d latest checkpointId:%" PRId64 " Ver:%" PRId64
" no need to update checkpoint info, updated checkpointId:%" PRId64 " Ver:%" PRId64
" transId:%d ignored",
id, vgId, pInfo->checkpointId, pInfo->checkpointVer, pReq->checkpointId, pReq->checkpointVer,
pReq->transId);
@ -541,28 +558,33 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV
id, vgId, pStatus.name, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer, pReq->checkpointVer,
pInfo->checkpointTime, pReq->checkpointTs);
} else { // not in restore status, must be in checkpoint status
if (pStatus.state == TASK_STATUS__CK) {
stDebug("s-task:%s vgId:%d status:%s start to update the checkpoint-info, checkpointId:%" PRId64 "->%" PRId64
" checkpointVer:%" PRId64 "->%" PRId64 " checkpointTs:%" PRId64 "->%" PRId64,
id, vgId, pStatus.name, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer, pReq->checkpointVer,
pInfo->checkpointTime, pReq->checkpointTs);
id, vgId, pStatus.name, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer,
pReq->checkpointVer, pInfo->checkpointTime, pReq->checkpointTs);
} else {
stDebug("s-task:%s vgId:%d status:%s NOT update the checkpoint-info, checkpointId:%" PRId64 "->%" PRId64
" checkpointVer:%" PRId64 "->%" PRId64,
id, vgId, pStatus.name, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer,
pReq->checkpointVer);
}
}
ASSERT(pInfo->checkpointId <= pReq->checkpointId && pInfo->checkpointVer <= pReq->checkpointVer &&
pInfo->processedVer <= pReq->checkpointVer);
// update only it is in checkpoint status.
if (pStatus.state == TASK_STATUS__CK) {
pInfo->checkpointId = pReq->checkpointId;
pInfo->checkpointVer = pReq->checkpointVer;
pInfo->checkpointTime = pReq->checkpointTs;
streamTaskClearCheckInfo(pTask, true);
if (pStatus.state == TASK_STATUS__CK) {
// todo handle error
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_CHECKPOINT_DONE);
} else {
stDebug("s-task:0x%x vgId:%d not handle checkpoint-done event, status:%s", pReq->taskId, vgId, pStatus.name);
}
streamTaskClearCheckInfo(pTask, true);
if (pReq->dropRelHTask) {
stDebug("s-task:0x%x vgId:%d drop the related fill-history task:0x%" PRIx64 " after update checkpoint",
pReq->taskId, vgId, pReq->hTaskId);
@ -670,7 +692,7 @@ int32_t uploadCheckpointData(SStreamTask* pTask, int64_t checkpointId, int64_t d
}
if (code == TSDB_CODE_SUCCESS) {
code = streamTaskUploadCheckpoint(idStr, path);
code = streamTaskUploadCheckpoint(idStr, path, checkpointId);
if (code == TSDB_CODE_SUCCESS) {
stDebug("s-task:%s upload checkpointId:%" PRId64 " to remote succ", idStr, checkpointId);
} else {
@ -810,6 +832,7 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
const char* id = pTask->id.idStr;
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
@ -820,24 +843,24 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
// check the status every 100ms
if (streamTaskShouldStop(pTask)) {
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stDebug("s-task:%s vgId:%d quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref);
streamMetaReleaseTask(pTask->pMeta, pTask);
return;
}
if (++pActiveInfo->checkCounter < 100) {
streamTmrReset(checkpointTriggerMonitorFn, 100, pTask, streamTimer, &pActiveInfo->pChkptTriggerTmr, vgId, "trigger-recv-monitor");
if (++pTmrInfo->activeCounter < 50) {
streamTmrReset(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor");
return;
}
pActiveInfo->checkCounter = 0;
pTmrInfo->activeCounter = 0;
stDebug("s-task:%s vgId:%d checkpoint-trigger monitor in tmr, ts:%" PRId64, id, vgId, now);
streamMutexLock(&pTask->lock);
SStreamTaskState pState = streamTaskGetStatus(pTask);
if (pState.state != TASK_STATUS__CK) {
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stDebug("s-task:%s vgId:%d not in checkpoint status, quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref);
streamMutexUnlock(&pTask->lock);
@ -847,7 +870,7 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
// checkpoint-trigger recv flag is set, quit
if (pActiveInfo->allUpstreamTriggerRecv) {
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stDebug("s-task:%s vgId:%d all checkpoint-trigger recv, quit from monitor checkpoint-trigger, ref:%d", id, vgId,
ref);
@ -867,6 +890,31 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
stDebug("s-task:%s start to triggerMonitor, reason:%s", id, tstrerror(terrno));
streamMutexUnlock(&pActiveInfo->lock);
stDebug("s-task:%s start to monitor checkpoint-trigger in 10s", id);
streamTmrReset(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor");
return;
}
if ((pTmrInfo->launchChkptId != pActiveInfo->activeId) || (pActiveInfo->activeId == 0)) {
streamMutexUnlock(&pActiveInfo->lock);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64
", quit, ref:%d",
id, vgId, pTmrInfo->launchChkptId, ref);
streamMetaReleaseTask(pTask->pMeta, pTask);
return;
}
// active checkpoint info is cleared for now
if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (pTask->chkInfo.startTs == 0)) {
streamMutexUnlock(&pActiveInfo->lock);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr, ref:%d",
id, vgId, ref);
streamMetaReleaseTask(pTask->pMeta, pTask);
return;
}
@ -900,9 +948,9 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
// check every 100ms
if (size > 0) {
stDebug("s-task:%s start to monitor checkpoint-trigger in 10s", id);
streamTmrReset(checkpointTriggerMonitorFn, 100, pTask, streamTimer, &pActiveInfo->pChkptTriggerTmr, vgId, "trigger-recv-monitor");
streamTmrReset(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor");
} else {
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stDebug("s-task:%s all checkpoint-trigger recved, quit from monitor checkpoint-trigger tmr, ref:%d", id, ref);
streamMetaReleaseTask(pTask->pMeta, pTask);
}
@ -1060,11 +1108,8 @@ void streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) {
streamMutexUnlock(&pInfo->lock);
}
int32_t streamTaskGetNumOfConfirmed(SStreamTask* pTask) {
SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo;
int32_t streamTaskGetNumOfConfirmed(SActiveCheckpointInfo* pInfo) {
int32_t num = 0;
streamMutexLock(&pInfo->lock);
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pDispatchTriggerList); ++i) {
STaskTriggerSendInfo* p = taosArrayGet(pInfo->pDispatchTriggerList, i);
if (p == NULL) {
@ -1075,7 +1120,6 @@ int32_t streamTaskGetNumOfConfirmed(SStreamTask* pTask) {
num++;
}
}
streamMutexUnlock(&pInfo->lock);
return num;
}
@ -1101,9 +1145,9 @@ void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId) {
}
}
int32_t numOfConfirmed = streamTaskGetNumOfConfirmed(pInfo);
streamMutexUnlock(&pInfo->lock);
int32_t numOfConfirmed = streamTaskGetNumOfConfirmed(pTask);
int32_t total = streamTaskGetNumOfDownstream(pTask);
if (taskId == 0) {
stError("s-task:%s recv invalid trigger-dispatch confirm, vgId:%d", pTask->id.idStr, vgId);
@ -1198,7 +1242,7 @@ ECHECKPOINT_BACKUP_TYPE streamGetCheckpointBackupType() {
}
}
int32_t streamTaskUploadCheckpoint(const char* id, const char* path) {
int32_t streamTaskUploadCheckpoint(const char* id, const char* path, int64_t checkpointId) {
int32_t code = 0;
if (id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
stError("invalid parameters in upload checkpoint, %s", id);
@ -1206,7 +1250,7 @@ int32_t streamTaskUploadCheckpoint(const char* id, const char* path) {
}
if (strlen(tsSnodeAddress) != 0) {
code = uploadByRsync(id, path);
code = uploadByRsync(id, path, checkpointId);
if (code != 0) {
return TAOS_SYSTEM_ERROR(errno);
}
@ -1233,14 +1277,14 @@ int32_t downloadCheckpointDataByName(const char* id, const char* fname, const ch
return 0;
}
int32_t streamTaskDownloadCheckpointData(const char* id, char* path) {
int32_t streamTaskDownloadCheckpointData(const char* id, char* path, int64_t checkpointId) {
if (id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
stError("down checkpoint data parameters invalid");
return -1;
}
if (strlen(tsSnodeAddress) != 0) {
return downloadRsync(id, path);
return downloadByRsync(id, path, checkpointId);
} else if (tsS3StreamEnabled) {
return s3GetObjectsByPrefix(id, path);
}
@ -1281,6 +1325,8 @@ int32_t streamTaskSendRestoreChkptMsg(SStreamTask* pTask) {
const char* id = pTask->id.idStr;
streamMutexLock(&pTask->lock);
ETaskStatus p = streamTaskGetStatus(pTask).state;
if (pTask->status.sendConsensusChkptId == true) {
stDebug("s-task:%s already start to consensus-checkpointId, not start again before it completed", id);
streamMutexUnlock(&pTask->lock);
@ -1291,9 +1337,15 @@ int32_t streamTaskSendRestoreChkptMsg(SStreamTask* pTask) {
streamMutexUnlock(&pTask->lock);
if (pTask->pBackend != NULL) {
streamFreeTaskState(pTask, p);
pTask->pBackend = NULL;
}
ASSERT(pTask->pBackend == NULL);
pTask->status.requireConsensusChkptId = true;
stDebug("s-task:%s set the require consensus-checkpointId flag", id);
return 0;
}

View File

@ -823,28 +823,29 @@ static void checkpointReadyMsgSendMonitorFn(void* param, void* tmrId) {
SStreamTask* pTask = param;
int32_t vgId = pTask->pMeta->vgId;
const char* id = pTask->id.idStr;
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr;
// check the status every 100ms
if (streamTaskShouldStop(pTask)) {
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stDebug("s-task:%s vgId:%d status:stop, quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref);
streamMetaReleaseTask(pTask->pMeta, pTask);
return;
}
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
if (++pActiveInfo->sendReadyCheckCounter < 100) {
streamTmrReset(checkpointReadyMsgSendMonitorFn, 100, pTask, streamTimer, &pActiveInfo->pSendReadyMsgTmr, vgId, "chkpt-ready-monitor");
if (++pTmrInfo->activeCounter < 50) {
streamTmrReset(checkpointReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor");
return;
}
pActiveInfo->sendReadyCheckCounter = 0;
stDebug("s-task:%s in sending checkpoint-ready msg monitor timer", id);
pTmrInfo->activeCounter = 0;
stDebug("s-task:%s in sending checkpoint-ready msg monitor tmr", id);
streamMutexLock(&pTask->lock);
SStreamTaskState pState = streamTaskGetStatus(pTask);
if (pState.state != TASK_STATUS__CK) {
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stDebug("s-task:%s vgId:%d status:%s not in checkpoint, quit from monitor checkpoint-ready send, ref:%d", id, vgId,
pState.name, ref);
streamMutexUnlock(&pTask->lock);
@ -858,10 +859,21 @@ static void checkpointReadyMsgSendMonitorFn(void* param, void* tmrId) {
SArray* pList = pActiveInfo->pReadyMsgList;
int32_t num = taosArrayGetSize(pList);
// active checkpoint info is cleared for now
if ((pActiveInfo->activeId == 0) && (pActiveInfo->transId == 0) && (num == 0) && (pTask->chkInfo.startTs == 0)) {
if (pTmrInfo->launchChkptId != pActiveInfo->activeId) {
streamMutexUnlock(&pActiveInfo->lock);
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stWarn("s-task:%s vgId:%d ready-msg send tmr launched by previous checkpoint procedure, checkpointId:%" PRId64
", quit, ref:%d",
id, vgId, pTmrInfo->launchChkptId, ref);
streamMetaReleaseTask(pTask->pMeta, pTask);
return;
}
// active checkpoint info is cleared for now
if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (num == 0) || (pTask->chkInfo.startTs == 0)) {
streamMutexUnlock(&pActiveInfo->lock);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from readyMsg send tmr, ref:%d", id, vgId, ref);
streamMetaReleaseTask(pTask->pMeta, pTask);
@ -923,10 +935,10 @@ static void checkpointReadyMsgSendMonitorFn(void* param, void* tmrId) {
}
}
streamTmrReset(checkpointReadyMsgSendMonitorFn, 100, pTask, streamTimer, &pActiveInfo->pSendReadyMsgTmr, vgId, "chkpt-ready-monitor");
streamTmrReset(checkpointReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor");
streamMutexUnlock(&pActiveInfo->lock);
} else {
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
stDebug(
"s-task:%s vgId:%d recv of checkpoint-ready msg confirmed by all upstream task(s), clear checkpoint-ready msg "
"and quit from timer, ref:%d",
@ -975,22 +987,32 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
}
}
streamMutexUnlock(&pActiveInfo->lock);
stDebug("s-task:%s level:%d checkpoint-ready msg sent to all %d upstreams", id, pTask->info.taskLevel, num);
// start to check if checkpoint ready msg has successfully received by upstream tasks.
if (pTask->info.taskLevel == TASK_LEVEL__SINK || pTask->info.taskLevel == TASK_LEVEL__AGG) {
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr;
int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1);
if (old == 0) {
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref);
streamMetaAcquireOneTask(pTask);
if (pActiveInfo->pSendReadyMsgTmr == NULL) {
pActiveInfo->pSendReadyMsgTmr = taosTmrStart(checkpointReadyMsgSendMonitorFn, 100, pTask, streamTimer);
if (pTmrInfo->tmrHandle == NULL) {
pTmrInfo->tmrHandle = taosTmrStart(checkpointReadyMsgSendMonitorFn, 200, pTask, streamTimer);
} else {
streamTmrReset(checkpointReadyMsgSendMonitorFn, 100, pTask, streamTimer, &pActiveInfo->pSendReadyMsgTmr, vgId, "chkpt-ready-monitor");
streamTmrReset(checkpointReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor");
}
// mark the timer monitor checkpointId
pTmrInfo->launchChkptId = pActiveInfo->activeId;
} else {
stError("s-task:%s previous checkpoint-ready monitor tmr is set, not start new one", pTask->id.idStr);
}
}
streamMutexUnlock(&pActiveInfo->lock);
return TSDB_CODE_SUCCESS;
}
@ -1061,7 +1083,7 @@ int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatch
int32_t doSendDispatchMsg(SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t vgId, SEpSet* pEpSet) {
void* buf = NULL;
int32_t code = -1;
int32_t code = 0;
SRpcMsg msg = {0};
// serialize
@ -1071,9 +1093,9 @@ int32_t doSendDispatchMsg(SStreamTask* pTask, const SStreamDispatchReq* pReq, in
goto FAIL;
}
code = -1;
buf = rpcMallocCont(sizeof(SMsgHead) + tlen);
if (buf == NULL) {
code = terrno;
goto FAIL;
}
@ -1097,6 +1119,10 @@ FAIL:
rpcFreeCont(buf);
}
if (code == -1) {
code = TSDB_CODE_INVALID_MSG;
}
return code;
}
@ -1267,16 +1293,17 @@ static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId
}
}
static bool setDispatchRspInfo(SDispatchMsgInfo* pMsgInfo, int32_t vgId, int32_t code, int64_t now, int32_t* pNotRsp, const char* id) {
static bool setDispatchRspInfo(SDispatchMsgInfo* pMsgInfo, int32_t vgId, int32_t code, int64_t now, int32_t* pNotRsp,
int32_t* pFailed, const char* id) {
int32_t numOfRsp = 0;
bool alreadySet = false;
bool updated = false;
bool allRsp = false;
*pNotRsp = 0;
int32_t numOfFailed = 0;
streamMutexLock(&pMsgInfo->lock);
bool allRsp = false;
int32_t numOfDispatchBranch = taosArrayGetSize(pMsgInfo->pSendInfo);
*pNotRsp = 0;
*pFailed = 0;
for (int32_t i = 0; i < numOfDispatchBranch; ++i) {
SDispatchEntry* pEntry = taosArrayGet(pMsgInfo->pSendInfo, i);
if (pEntry == NULL) {
@ -1295,24 +1322,34 @@ static bool setDispatchRspInfo(SDispatchMsgInfo* pMsgInfo, int32_t vgId, int32_t
}
if (pEntry->nodeId == vgId) {
ASSERT(!alreadySet);
if (pEntry->rspTs != -1) {
stDebug("s-task:%s dispatch rsp has already recved at:%" PRId64 ", ignore this rsp, msgId:%d", id,
pEntry->rspTs, pMsgInfo->msgId);
allRsp = false;
} else {
pEntry->rspTs = now;
pEntry->status = code;
alreadySet = true;
updated = true;
numOfRsp += 1;
allRsp = (numOfRsp == numOfDispatchBranch);
stDebug("s-task:%s record the rsp recv, ts:%" PRId64 " code:%d, idx:%d, total recv:%d/%d", id, now, code, j,
numOfRsp, numOfDispatchBranch);
}
break;
}
}
// this code may be error code.
for (int32_t i = 0; i < numOfDispatchBranch; ++i) {
SDispatchEntry* pEntry = taosArrayGet(pMsgInfo->pSendInfo, i);
if (pEntry->status != TSDB_CODE_SUCCESS || isDispatchRspTimeout(pEntry, now)) {
numOfFailed += 1;
}
}
*pFailed = numOfFailed;
*pNotRsp = numOfDispatchBranch - numOfRsp;
allRsp = (numOfRsp == numOfDispatchBranch);
streamMutexUnlock(&pMsgInfo->lock);
ASSERT(updated);
return allRsp;
}
@ -1345,15 +1382,23 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
int64_t now = taosGetTimestampMs();
bool allRsp = false;
int32_t notRsp = 0;
int32_t numOfFailed = 0;
bool triggerDispatchRsp = false;
// we only set the dispatch msg info for current checkpoint trans
streamMutexLock(&pTask->lock);
triggerDispatchRsp = (streamTaskGetStatus(pTask).state == TASK_STATUS__CK) &&
(pTask->chkInfo.pActiveInfo->activeId == pMsgInfo->checkpointId);
streamMutexUnlock(&pTask->lock);
streamMutexLock(&pMsgInfo->lock);
int32_t msgId = pMsgInfo->msgId;
streamMutexUnlock(&pMsgInfo->lock);
// follower not handle the dispatch rsp
if ((pTask->pMeta->role == NODE_ROLE_FOLLOWER) || (pTask->status.downstreamReady != 1)) {
stError("s-task:%s vgId:%d is follower or task just re-launched, not handle the dispatch rsp, discard it", id,
vgId);
streamMutexUnlock(&pMsgInfo->lock);
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
}
@ -1362,6 +1407,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
stError("s-task:%s vgId:%d not expect rsp, expected: msgId:%d, stage:%" PRId64 " actual msgId:%d, stage:%" PRId64
" discard it",
id, vgId, msgId, pTask->pMeta->stage, pRsp->msgId, pRsp->stage);
streamMutexUnlock(&pMsgInfo->lock);
return TSDB_CODE_INVALID_MSG;
}
@ -1373,18 +1419,18 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { // destination task does not exist, not retry anymore
stError("s-task:%s failed to dispatch msg to task:0x%x(vgId:%d), msgId:%d no retry, since task destroyed already",
id, pRsp->downstreamTaskId, pRsp->downstreamNodeId, msgId);
allRsp = setDispatchRspInfo(pMsgInfo, pRsp->downstreamNodeId, TSDB_CODE_SUCCESS, now, &notRsp, id);
allRsp = setDispatchRspInfo(pMsgInfo, pRsp->downstreamNodeId, TSDB_CODE_SUCCESS, now, &notRsp, &numOfFailed, id);
} else {
stError("s-task:%s failed to dispatch msgId:%d to task:0x%x(vgId:%d), code:%s, add to retry list", id, msgId,
pRsp->downstreamTaskId, pRsp->downstreamNodeId, tstrerror(code));
allRsp = setDispatchRspInfo(pMsgInfo, pRsp->downstreamNodeId, code, now, &notRsp, id);
allRsp = setDispatchRspInfo(pMsgInfo, pRsp->downstreamNodeId, code, now, &notRsp, &numOfFailed, id);
}
} else { // code == 0
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
pTask->inputq.status = TASK_INPUT_STATUS__BLOCKED;
// block the input of current task, to push pressure to upstream
allRsp = setDispatchRspInfo(pMsgInfo, pRsp->downstreamNodeId, pRsp->inputStatus, now, &notRsp, id);
allRsp = setDispatchRspInfo(pMsgInfo, pRsp->downstreamNodeId, pRsp->inputStatus, now, &notRsp, &numOfFailed, id);
stTrace("s-task:%s inputQ of downstream task:0x%x(vgId:%d) is full, wait for retry dispatch", id,
pRsp->downstreamTaskId, pRsp->downstreamNodeId);
} else {
@ -1396,15 +1442,13 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
id, pRsp->downstreamTaskId, pRsp->downstreamNodeId);
}
allRsp = setDispatchRspInfo(pMsgInfo, pRsp->downstreamNodeId, TSDB_CODE_SUCCESS, now, &notRsp, id);
allRsp = setDispatchRspInfo(pMsgInfo, pRsp->downstreamNodeId, TSDB_CODE_SUCCESS, now, &notRsp, &numOfFailed, id);
{
bool delayDispatch = (pMsgInfo->dispatchMsgType == STREAM_INPUT__CHECKPOINT_TRIGGER);
if (delayDispatch) {
streamMutexLock(&pTask->lock);
// we only set the dispatch msg info for current checkpoint trans
if (streamTaskGetStatus(pTask).state == TASK_STATUS__CK &&
pTask->chkInfo.pActiveInfo->activeId == pMsgInfo->checkpointId) {
if (triggerDispatchRsp) {
ASSERT(pTask->chkInfo.pActiveInfo->transId == pMsgInfo->transId);
stDebug("s-task:%s checkpoint-trigger msg to 0x%x rsp for checkpointId:%" PRId64 " transId:%d confirmed",
pTask->id.idStr, pRsp->downstreamTaskId, pMsgInfo->checkpointId, pMsgInfo->transId);
@ -1415,12 +1459,13 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
" transId:%d discard, since expired",
pTask->id.idStr, pMsgInfo->checkpointId, pMsgInfo->transId);
}
streamMutexUnlock(&pTask->lock);
}
}
}
}
streamMutexUnlock(&pMsgInfo->lock);
if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
if (!allRsp) {
stDebug(
@ -1439,16 +1484,12 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
}
// all msg rsp already, continue
if (allRsp) {
ASSERT(pTask->outputq.status == TASK_OUTPUT_STATUS__WAIT);
// we need to re-try send dispatch msg to downstream tasks
int32_t numOfFailed = getFailedDispatchInfo(pMsgInfo, now);
if (numOfFailed == 0) { // this message has been sent successfully, let's try next one.
if (allRsp && (numOfFailed == 0)) {
// trans-state msg has been sent to downstream successfully. let's transfer the fill-history task state
if (pMsgInfo->dispatchMsgType == STREAM_INPUT__TRANS_STATE) {
stDebug("s-task:%s dispatch trans-state msgId:%d to downstream successfully, start to prepare transfer state",
id, msgId);
stDebug("s-task:%s dispatch trans-state msgId:%d to downstream successfully, start to prepare transfer state", id,
msgId);
ASSERT(pTask->info.fillHistory == 1);
code = streamTransferStatePrepare(pTask);
@ -1460,10 +1501,10 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
// now ready for next data output
atomic_store_8(&pTask->outputq.status, TASK_OUTPUT_STATUS__NORMAL);
} else {
// this message has been sent successfully, let's try next one.
code = handleDispatchSuccessRsp(pTask, pRsp->downstreamTaskId, pRsp->downstreamNodeId);
}
}
}
return code;
}

View File

@ -142,11 +142,12 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) {
}
SStreamHbMsg* pMsg = &pInfo->hbMsg;
stDebug("vgId:%d build stream hbMsg, leader:%d msgId:%d", pMeta->vgId, (pMeta->role == NODE_ROLE_LEADER),
pMeta->pHbInfo->hbCount);
pMsg->vgId = pMeta->vgId;
pMsg->msgId = pMeta->pHbInfo->hbCount;
pMsg->ts = taosGetTimestampMs();
stDebug("vgId:%d build stream hbMsg, leader:%d HbMsgId:%d, HbMsgTs:%" PRId64, pMeta->vgId,
(pMeta->role == NODE_ROLE_LEADER), pMsg->msgId, pMsg->ts);
pMsg->pTaskStatus = taosArrayInit(numOfTasks, sizeof(STaskStatusEntry));
pMsg->pUpdateNodes = taosArrayInit(numOfTasks, sizeof(int32_t));
@ -292,14 +293,14 @@ void streamMetaHbToMnode(void* param, void* tmrId) {
streamMetaRLock(pMeta);
code = streamMetaSendHbHelper(pMeta);
if (code) {
stError("vgId:%d failed to send hmMsg to mnode, try again in 5s, code:%s", pMeta->vgId, strerror(code));
stError("vgId:%d failed to send hmMsg to mnode, try again in 5s, code:%s", pMeta->vgId, tstrerror(code));
}
streamMetaRUnLock(pMeta);
streamTmrReset(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, pMeta->vgId,
"meta-hb-tmr");
code = taosReleaseRef(streamMetaId, rid);
if (code) {
stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, pMeta->vgId, rid);
}

View File

@ -318,7 +318,19 @@ int32_t streamTaskSetDb(SStreamMeta* pMeta, SStreamTask* pTask, const char* key)
pBackend->pTask = pTask;
pBackend->pMeta = pMeta;
if (processVer != -1) pTask->chkInfo.processedVer = processVer;
if (processVer != -1) {
if (pTask->chkInfo.processedVer != processVer) {
stWarn("s-task:%s vgId:%d update checkpointVer:%" PRId64 "->%" PRId64 " for checkpointId:%" PRId64,
pTask->id.idStr, pTask->pMeta->vgId, pTask->chkInfo.processedVer, processVer, pTask->chkInfo.checkpointId);
pTask->chkInfo.processedVer = processVer;
pTask->chkInfo.checkpointVer = processVer;
pTask->chkInfo.nextProcessVer = processVer + 1;
} else {
stInfo("s-task:%s vgId:%d processedVer:%" PRId64
" in task meta equals to data in checkpoint data for checkpointId:%" PRId64,
pTask->id.idStr, pTask->pMeta->vgId, pTask->chkInfo.processedVer, pTask->chkInfo.checkpointId);
}
}
code = taosHashPut(pMeta->pTaskDbUnique, key, strlen(key), &pBackend, sizeof(void*));
if (code) {
@ -1407,7 +1419,6 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
}
// negotiate the consensus checkpoint id for current task
ASSERT(pTask->pBackend == NULL);
code = streamTaskSendRestoreChkptMsg(pTask);
// this task may has no checkpoint, but others tasks may generate checkpoint already?

View File

@ -398,6 +398,7 @@ int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pReq) {
}
if (tEncodeI32(pEncoder, pReq->msgId) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
tEndEncode(pEncoder);
return pEncoder->pos;
}
@ -470,6 +471,7 @@ int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq) {
}
if (tDecodeI32(pDecoder, &pReq->msgId) < 0) return -1;
if (tDecodeI64(pDecoder, &pReq->ts) < 0) return -1;
tEndDecode(pDecoder);
return 0;

View File

@ -107,7 +107,7 @@ void streamTaskResumeHelper(void* param, void* tmrId) {
int32_t code = streamTaskSchedTask(pTask->pMsgCb, pTask->info.nodeId, pId->streamId, pId->taskId, STREAM_EXEC_T_RESUME_TASK);
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
if (code) {
stError("s-task:%s sched task failed, code:%s, ref:%d", pId->idStr, strerror(code), ref);
stError("s-task:%s sched task failed, code:%s, ref:%d", pId->idStr, tstrerror(code), ref);
} else {
stDebug("trigger to resume s-task:%s after being idled for %dms, ref:%d", pId->idStr, pTask->status.schedIdleTime,
ref);

View File

@ -1140,14 +1140,16 @@ void streamTaskDestroyActiveChkptInfo(SActiveCheckpointInfo* pInfo) {
taosArrayDestroy(pInfo->pCheckpointReadyRecvList);
pInfo->pCheckpointReadyRecvList = NULL;
if (pInfo->pChkptTriggerTmr != NULL) {
(void) taosTmrStop(pInfo->pChkptTriggerTmr);
pInfo->pChkptTriggerTmr = NULL;
SStreamTmrInfo* pTriggerTmr = &pInfo->chkptTriggerMsgTmr;
if (pTriggerTmr->tmrHandle != NULL) {
(void) taosTmrStop(pTriggerTmr->tmrHandle);
pTriggerTmr->tmrHandle = NULL;
}
if (pInfo->pSendReadyMsgTmr != NULL) {
(void) taosTmrStop(pInfo->pSendReadyMsgTmr);
pInfo->pSendReadyMsgTmr = NULL;
SStreamTmrInfo* pReadyTmr = &pInfo->chkptReadyMsgTmr;
if (pReadyTmr->tmrHandle != NULL) {
(void) taosTmrStop(pReadyTmr->tmrHandle);
pReadyTmr->tmrHandle = NULL;
}
taosMemoryFree(pInfo);

View File

@ -96,12 +96,6 @@ static int32_t attachWaitedEvent(SStreamTask* pTask, SFutureHandleEventInfo* pEv
}
}
static int32_t stopTaskSuccFn(SStreamTask* pTask) {
SStreamTaskSM* pSM = pTask->status.pSM;
streamFreeTaskState(pTask,pSM->current.state == TASK_STATUS__DROPPING ? 1 : 0);
return TSDB_CODE_SUCCESS;
}
int32_t streamTaskInitStatus(SStreamTask* pTask) {
pTask->execInfo.checkTs = taosGetTimestampMs();
stDebug("s-task:%s start init, and check downstream tasks, set the init ts:%" PRId64, pTask->id.idStr,
@ -698,21 +692,21 @@ void doInitStateTransferTable(void) {
// resume is completed by restore status of state-machine
// stop related event
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, stopTaskSuccFn, NULL);
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
CHECK_RET_VAL(taosArrayPush(streamTaskSMTrans, &trans));
trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, stopTaskSuccFn, NULL);
trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
CHECK_RET_VAL(taosArrayPush(streamTaskSMTrans, &trans));
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, stopTaskSuccFn, NULL);
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
CHECK_RET_VAL(taosArrayPush(streamTaskSMTrans, &trans));
trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, stopTaskSuccFn, NULL);
trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
CHECK_RET_VAL(taosArrayPush(streamTaskSMTrans, &trans));
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, stopTaskSuccFn, NULL);
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
CHECK_RET_VAL(taosArrayPush(streamTaskSMTrans, &trans));
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, stopTaskSuccFn, NULL);
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
CHECK_RET_VAL(taosArrayPush(streamTaskSMTrans, &trans));
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, stopTaskSuccFn, NULL);
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
CHECK_RET_VAL(taosArrayPush(streamTaskSMTrans, &trans));
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, stopTaskSuccFn, NULL);
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
CHECK_RET_VAL(taosArrayPush(streamTaskSMTrans, &trans));
// dropping related event

View File

@ -50,3 +50,13 @@ void streamTmrReset(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* param, void* h
// stError("vgId:%d failed to reset tmr: %s, try again", vgId, pMsg);
// }
}
int32_t streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, SStreamTask* pTask) {
pInfo->activeCounter = 0;
pInfo->launchChkptId = 0;
atomic_store_8(&pInfo->isActive, 0);
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
ASSERT(ref >= 0);
return ref;
}

View File

@ -1407,7 +1407,8 @@ int32_t syncNodeRestore(SSyncNode* pSyncNode) {
pSyncNode->commitIndex = TMAX(pSyncNode->commitIndex, commitIndex);
sInfo("vgId:%d, restore sync until commitIndex:%" PRId64, pSyncNode->vgId, pSyncNode->commitIndex);
if (pSyncNode->fsmState != SYNC_FSM_STATE_INCOMPLETE && (code = syncLogBufferCommit(pSyncNode->pLogBuf, pSyncNode, pSyncNode->commitIndex)) < 0) {
if (pSyncNode->fsmState != SYNC_FSM_STATE_INCOMPLETE &&
(code = syncLogBufferCommit(pSyncNode->pLogBuf, pSyncNode, pSyncNode->commitIndex)) < 0) {
TAOS_RETURN(code);
}
@ -2187,7 +2188,7 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) {
}
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
ASSERT(lastIndex >= 0);
// ASSERT(lastIndex >= 0);
sInfo("vgId:%d, become leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64 "", pSyncNode->vgId,
raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex);
}

View File

@ -892,7 +892,7 @@ int32_t syncLogReplRecover(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEn
if (pMsg->matchIndex < pNode->pLogBuf->matchIndex) {
code = syncLogReplGetPrevLogTerm(pMgr, pNode, index + 1, &term);
if (term < 0 && (errno == ENFILE || errno == EMFILE)) {
if (term < 0 && (errno == ENFILE || errno == EMFILE || errno == ENOENT)) {
sError("vgId:%d, failed to get prev log term since %s. index:%" PRId64, pNode->vgId, tstrerror(code), index + 1);
TAOS_RETURN(code);
}

View File

@ -1620,15 +1620,18 @@ static void cliHandleFreeById(SCliMsg* pMsg, SCliThrd* pThrd) {
if (size == 0) {
// already recv, and notify upper layer
TAOS_CHECK_GOTO(TSDB_CODE_REF_INVALID_ID, NULL, _exception);
return;
} else {
while (T_REF_VAL_GET(conn) >= 1) transUnrefCliHandle(conn);
while (T_REF_VAL_GET(conn) >= 1) {
transUnrefCliHandle(conn);
}
return;
}
_exception:
tDebug("already free conn %p by id %" PRId64"", conn, refId);
(void)transReleaseExHandle(transGetRefMgt(), refId);
(void)transReleaseExHandle(transGetRefMgt(), refId);
(void)transRemoveExHandle(transGetRefMgt(), refId);
destroyCmsg(pMsg);
}
@ -2225,6 +2228,11 @@ static FORCE_INLINE void destroyCmsgAndAhandle(void* param) {
pThrd->destroyAhandleFp(pMsg->ctx->ahandle);
}
if (pMsg->msg.info.handle !=0) {
(void)transReleaseExHandle(transGetRefMgt(), (int64_t)pMsg->msg.info.handle);
(void)transRemoveExHandle(transGetRefMgt(), (int64_t)pMsg->msg.info.handle);
}
transDestroyConnCtx(pMsg->ctx);
transFreeMsg(pMsg->msg.pCont);
taosMemoryFree(pMsg);

View File

@ -871,14 +871,14 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) {
{
int n = readlink("/etc/localtime", buf, sizeof(buf));
if (n < 0) {
printf("read /etc/localtime error, reason:%s", strerror(errno));
printf("read /etc/localtime error, reason:%s\n", strerror(errno));
return;
}
buf[n] = '\0';
char *zi = strstr(buf, "zoneinfo");
if (!zi) {
printf("parsing /etc/localtime failed");
printf("parsing /etc/localtime failed\n");
return;
}
tz = zi + strlen("zoneinfo") + 1;
@ -893,7 +893,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) {
// }
// }
// if (!tz || 0 == strchr(tz, '/')) {
// printf("parsing /etc/localtime failed");
// printf("parsing /etc/localtime failed\n");
// return;
// }
@ -927,7 +927,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) {
{
int n = readlink("/etc/localtime", buf, sizeof(buf)-1);
if (n < 0) {
(void)printf("read /etc/localtime error, reason:%s", strerror(errno));
(void)printf("read /etc/localtime error, reason:%s\n", strerror(errno));
if (taosCheckExistFile("/etc/timezone")) {
/*
@ -947,7 +947,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) {
int len = taosReadFile(pFile, buf, 64);
if (len < 0) {
(void)taosCloseFile(&pFile);
(void)printf("read /etc/timezone error, reason:%s", strerror(errno));
(void)printf("read /etc/timezone error, reason:%s\n", strerror(errno));
return;
}
@ -994,7 +994,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) {
char *zi = strstr(buf, "zoneinfo");
if (!zi) {
(void)printf("parsing /etc/localtime failed");
(void)printf("parsing /etc/localtime failed\n");
return;
}
tz = zi + strlen("zoneinfo") + 1;

View File

@ -409,6 +409,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_ALREADY_IS_VOTER, "Vnode already is a vo
TAOS_DEFINE_ERROR(TSDB_CODE_VND_DIR_ALREADY_EXIST, "Vnode directory already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_META_DATA_UNSAFE_DELETE, "Single replica vnode data will lost permanently after this operation, if you make sure this, please use drop dnode <id> unsafe to execute")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_ARB_NOT_SYNCED, "Vgroup peer is not synced")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_WRITE_DISABLED, "Vnode write is disabled for snapshot")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_COLUMN_COMPRESS_ALREADY_EXIST,"Same with old param")
@ -753,7 +754,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_INVALID_STAT, "Invalid tsma state"
TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_INVALID_PTR, "Invalid tsma pointer")
TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_INVALID_PARA, "Invalid tsma parameters")
TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_INVALID_TB, "Invalid table to create tsma, only stable or normal table allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_INVALID_INTERVAL, "Invalid tsma interval, 1m ~ 1h is allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_INVALID_INTERVAL, "Invalid tsma interval, 1m ~ 1y is allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_INVALID_FUNC_PARAM, "Invalid tsma func param, only one non-tag column allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_UNSUPPORTED_FUNC, "Tsma func not supported")
TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_MUST_BE_DROPPED, "Tsma must be dropped first")

View File

@ -384,6 +384,9 @@ static LRUStatus taosLRUCacheShardInsertEntry(SLRUCacheShard *shard, SLRUEntry *
bool freeOnFail) {
LRUStatus status = TAOS_LRU_STATUS_OK;
SArray *lastReferenceList = taosArrayInit(16, POINTER_BYTES);
if (!lastReferenceList) {
return TAOS_LRU_STATUS_FAIL;
}
(void)taosThreadMutexLock(&shard->mutex);

View File

@ -33,7 +33,7 @@ int32_t tScalableBfInit(uint64_t expectedEntries, double errorRate, SScalableBf*
int32_t lino = 0;
const uint32_t defaultSize = 8;
if (expectedEntries < 1 || errorRate <= 0 || errorRate >= 1.0) {
code = TSDB_CODE_FAILED;
code = TSDB_CODE_INVALID_PARA;
QUERY_CHECK_CODE(code, lino, _error);
}
SScalableBf* pSBf = taosMemoryCalloc(1, sizeof(SScalableBf));
@ -71,7 +71,7 @@ int32_t tScalableBfPutNoCheck(SScalableBf* pSBf, const void* keyBuf, uint32_t le
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
if (pSBf->status == SBF_INVALID) {
code = TSDB_CODE_FAILED;
code = TSDB_CODE_OUT_OF_BUFFER;
QUERY_CHECK_CODE(code, lino, _error);
}
int32_t size = taosArrayGetSize(pSBf->bfArray);
@ -92,7 +92,7 @@ int32_t tScalableBfPutNoCheck(SScalableBf* pSBf, const void* keyBuf, uint32_t le
_error:
if (code != TSDB_CODE_SUCCESS) {
uError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
uDebug("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
@ -101,7 +101,7 @@ int32_t tScalableBfPut(SScalableBf* pSBf, const void* keyBuf, uint32_t len, int3
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
if (pSBf->status == SBF_INVALID) {
code = TSDB_CODE_FAILED;
code = TSDB_CODE_OUT_OF_BUFFER;
QUERY_CHECK_CODE(code, lino, _end);
}
uint64_t h1 = (uint64_t)pSBf->hashFn1(keyBuf, len);
@ -153,7 +153,7 @@ static int32_t tScalableBfAddFilter(SScalableBf* pSBf, uint64_t expectedEntries,
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
if (taosArrayGetSize(pSBf->bfArray) >= pSBf->maxBloomFilters) {
code = TSDB_CODE_FAILED;
code = TSDB_CODE_OUT_OF_BUFFER;
QUERY_CHECK_CODE(code, lino, _error);
}
@ -163,7 +163,7 @@ static int32_t tScalableBfAddFilter(SScalableBf* pSBf, uint64_t expectedEntries,
if (taosArrayPush(pSBf->bfArray, &pNormalBf) == NULL) {
tBloomFilterDestroy(pNormalBf);
code = TSDB_CODE_OUT_OF_MEMORY;
code = terrno;
QUERY_CHECK_CODE(code, lino, _error);
}
pSBf->numBits += pNormalBf->numBits;
@ -217,7 +217,7 @@ int32_t tScalableBfDecode(SDecoder* pDecoder, SScalableBf** ppSBf) {
pSBf->bfArray = NULL;
int32_t size = 0;
if (tDecodeI32(pDecoder, &size) < 0) {
code = TSDB_CODE_FAILED;
code = terrno;
QUERY_CHECK_CODE(code, lino, _error);
}
if (size == 0) {
@ -242,19 +242,19 @@ int32_t tScalableBfDecode(SDecoder* pDecoder, SScalableBf** ppSBf) {
}
}
if (tDecodeU32(pDecoder, &pSBf->growth) < 0) {
code = TSDB_CODE_FAILED;
code = terrno;
QUERY_CHECK_CODE(code, lino, _error);
}
if (tDecodeU64(pDecoder, &pSBf->numBits) < 0) {
code = TSDB_CODE_FAILED;
code = terrno;
QUERY_CHECK_CODE(code, lino, _error);
}
if (tDecodeU32(pDecoder, &pSBf->maxBloomFilters) < 0) {
code = TSDB_CODE_FAILED;
code = terrno;
QUERY_CHECK_CODE(code, lino, _error);
}
if (tDecodeI8(pDecoder, &pSBf->status) < 0) {
code = TSDB_CODE_FAILED;
code = terrno;
QUERY_CHECK_CODE(code, lino, _error);
}
(*ppSBf) = pSBf;

View File

@ -792,6 +792,16 @@ bool tQueryAutoQWorkerTryRecycleWorker(SQueryAutoQWorkerPool *pPool, SQueryAutoQ
int32_t tQueryAutoQWorkerInit(SQueryAutoQWorkerPool *pool) {
int32_t code;
(void)taosThreadMutexInit(&pool->poolLock, NULL);
(void)taosThreadMutexInit(&pool->backupLock, NULL);
(void)taosThreadMutexInit(&pool->waitingAfterBlockLock, NULL);
(void)taosThreadMutexInit(&pool->waitingBeforeProcessMsgLock, NULL);
(void)taosThreadCondInit(&pool->waitingBeforeProcessMsgCond, NULL);
(void)taosThreadCondInit(&pool->waitingAfterBlockCond, NULL);
(void)taosThreadCondInit(&pool->backupCond, NULL);
code = taosOpenQset(&pool->qset);
if (code) return terrno = code;
pool->workers = tdListNew(sizeof(SQueryAutoQWorker));
@ -802,14 +812,6 @@ int32_t tQueryAutoQWorkerInit(SQueryAutoQWorkerPool *pool) {
if (!pool->exitedWorkers) return TSDB_CODE_OUT_OF_MEMORY;
pool->maxInUse = pool->max * 2 + 2;
(void)taosThreadMutexInit(&pool->poolLock, NULL);
(void)taosThreadMutexInit(&pool->backupLock, NULL);
(void)taosThreadMutexInit(&pool->waitingAfterBlockLock, NULL);
(void)taosThreadMutexInit(&pool->waitingBeforeProcessMsgLock, NULL);
(void)taosThreadCondInit(&pool->waitingBeforeProcessMsgCond, NULL);
(void)taosThreadCondInit(&pool->waitingAfterBlockCond, NULL);
(void)taosThreadCondInit(&pool->backupCond, NULL);
if (!pool->pCb) {
pool->pCb = taosMemoryCalloc(1, sizeof(SQueryAutoQWorkerPoolCB));
@ -824,13 +826,17 @@ int32_t tQueryAutoQWorkerInit(SQueryAutoQWorkerPool *pool) {
void tQueryAutoQWorkerCleanup(SQueryAutoQWorkerPool *pPool) {
(void)taosThreadMutexLock(&pPool->poolLock);
pPool->exit = true;
int32_t size = listNEles(pPool->workers);
int32_t size = 0;
if (pPool->workers) {
size = listNEles(pPool->workers);
}
if (pPool->backupWorkers) {
size += listNEles(pPool->backupWorkers);
}
if (pPool->qset) {
for (int32_t i = 0; i < size; ++i) {
taosQsetThreadResume(pPool->qset);
}
size = listNEles(pPool->backupWorkers);
for (int32_t i = 0; i < size; ++i) {
taosQsetThreadResume(pPool->qset);
}
(void)taosThreadMutexUnlock(&pPool->poolLock);
@ -848,7 +854,7 @@ void tQueryAutoQWorkerCleanup(SQueryAutoQWorkerPool *pPool) {
int32_t idx = 0;
SQueryAutoQWorker *worker = NULL;
while (true) {
while (pPool->workers) {
(void)taosThreadMutexLock(&pPool->poolLock);
if (listNEles(pPool->workers) == 0) {
(void)taosThreadMutexUnlock(&pPool->poolLock);
@ -864,7 +870,7 @@ void tQueryAutoQWorkerCleanup(SQueryAutoQWorkerPool *pPool) {
taosMemoryFree(pNode);
}
while (listNEles(pPool->backupWorkers) > 0) {
while (pPool->backupWorkers && listNEles(pPool->backupWorkers) > 0) {
SListNode *pNode = tdListPopHead(pPool->backupWorkers);
worker = (SQueryAutoQWorker *)pNode->data;
if (worker && taosCheckPthreadValid(worker->thread)) {
@ -874,7 +880,7 @@ void tQueryAutoQWorkerCleanup(SQueryAutoQWorkerPool *pPool) {
taosMemoryFree(pNode);
}
while (listNEles(pPool->exitedWorkers) > 0) {
while (pPool->exitedWorkers && listNEles(pPool->exitedWorkers) > 0) {
SListNode *pNode = tdListPopHead(pPool->exitedWorkers);
worker = (SQueryAutoQWorker *)pNode->data;
if (worker && taosCheckPthreadValid(worker->thread)) {
@ -935,7 +941,6 @@ STaosQueue *tQueryAutoQWorkerAllocQueue(SQueryAutoQWorkerPool *pool, void *ahand
if (taosThreadCreate(&pWorker->thread, &thAttr, (ThreadFp)tQueryAutoQWorkerThreadFp, pWorker) != 0) {
taosCloseQueue(queue);
terrno = TSDB_CODE_OUT_OF_MEMORY;
queue = NULL;
break;
}

View File

@ -60,7 +60,7 @@ class TDTestCase(TBase):
"enableCoreFile 1",
"fqdn 127.0.0.1",
"firstEp 127.0.0.1",
"locale ENG",
"locale en_US.UTF-8",
"metaCacheMaxSize 10000",
"minimalTmpDirGB 5",
"minimalLogDirGB 1",

View File

@ -113,6 +113,34 @@ class TDTestCase(TBase):
tdSql.checkData(0, 0, f"nihao{num + 2}")
tdSql.checkData(0, 1, f"{11*i}")
def FIX_TS_5239(self):
tdLog.info("check bug TS_5239 ...\n")
sqls = [
"drop database if exists ts_5239",
"create database ts_5239 cachemodel 'both' stt_trigger 1;",
"use ts_5239;",
"CREATE STABLE st (ts timestamp, c1 int) TAGS (groupId int);",
"CREATE TABLE ct1 USING st TAGS (1);"
]
tdSql.executes(sqls)
# 2024-07-03 06:00:00.000
start_ts = 1719957600000
# insert 100 rows
sql = "insert into ct1 values "
for i in range(100):
sql += f"('{start_ts+i * 100}', {i+1})"
sql += ";"
tdSql.execute(sql)
tdSql.execute("flush database ts_5239;")
tdSql.execute("alter database ts_5239 stt_trigger 3;")
tdSql.execute(f"insert into ct1(ts) values({start_ts - 100 * 100})")
tdSql.execute("flush database ts_5239;")
tdSql.execute(f"insert into ct1(ts) values({start_ts + 100 * 200})")
tdSql.execute("flush database ts_5239;")
tdSql.query("select count(*) from ct1;")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 102)
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
@ -123,6 +151,7 @@ class TDTestCase(TBase):
# TS BUGS
self.FIX_TS_5105()
self.FIX_TS_5143()
self.FIX_TS_5239()
tdLog.success(f"{__file__} successfully executed")

View File

@ -0,0 +1,98 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
import json
import random
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
def checksql(self, sql):
result = os.popen(f"taos -s \"{sql}\" ")
res = result.read()
print(res)
if ("Query OK" in res):
tdLog.info(f"checkEqual success")
else :
tdLog.exit(f"checkEqual error")
def generate_random_str(self,randomlength=32):
"""
生成一个指定长度的随机字符串
"""
random_str = ''
base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz1234567890'
#base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz'
length = len(base_str) - 1
count = 0
for i in range(randomlength):
count = count + 1
random_str += base_str[random.randint(0, length)]
return random_str
def check(self):
# tdSql.execute("create database db" )
# tdSql.execute("create table db.jtable (ts timestamp, c1 VARCHAR(64000))",queryTimes=2)
# with open('./1-insert/temp.json', 'r') as f:
# data = json.load(f)
# json_str=json.dumps(data)
# print(data,type(data),type(json_str))
# json_str=json_str.replace('"','\\"')
# # sql = f"insert into db.jtable values(now,\"{json_str}\") "
# # os.system(f"taos -s {sql} ")
# rowNum = 100
# step = 1000
# self.ts = 1537146000000
# for j in range(1000):
# sql = "insert into db.jtable values"
# for k in range(rowNum):
# self.ts += step
# sql += f"({self.ts},\"{json_str}\") "
# tdSql.execute(sql,queryTimes=2)
# tdSql.execute("flush database db",queryTimes=2)
tdSql.execute("create database db1" )
tdSql.execute("create table db1.jtable (ts timestamp, c1 VARCHAR(6400) compress 'zstd')",queryTimes=2)
# with open('./1-insert/seedStr.json', 'r') as f:
# data = f.read()
# json_str=str(data)
# print(data,type(data),type(json_str))
# json_str=json_str.replace('"','\\"')
rowNum = 100
step = 1000
self.ts = 1657146000000
f=self.generate_random_str(5750)
json_str=f.replace('"','\\"')
for j in range(1000):
sql = "insert into db1.jtable values"
# f=self.generate_random_str(5750)
# json_str=f.replace('"','\\"')
for k in range(rowNum):
self.ts += step
f=self.generate_random_str(5750)
json_str=f.replace('"','\\"')
sql += f"({self.ts},\"{json_str}\") "
#print(sql)
tdSql.execute(sql,queryTimes=2)
tdSql.execute("flush database db1",queryTimes=2)
def run(self):
self.check()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

98
tests/army/tmq/tmqBugs.py Normal file
View File

@ -0,0 +1,98 @@
import taos
import sys
import time
import socket
import os
import threading
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from taos.tmq import *
import frame.etool
class TDTestCase:
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def td_31283_test(self):
tdSql.execute(f'create database if not exists d1 vgroups 1')
tdSql.execute(f'use d1')
tdSql.execute(f'create table st(ts timestamp, i int) tags(t int)')
tdSql.execute(f'insert into t1 using st tags(1) values(now, 1) (now+1s, 2)')
tdSql.execute(f'insert into t2 using st tags(2) values(now, 1) (now+1s, 2)')
tdSql.execute(f'insert into t3 using st tags(3) values(now, 1) (now+1s, 2)')
tdSql.execute(f'insert into t1 using st tags(1) values(now+5s, 11) (now+10s, 12)')
tdSql.query("select * from st")
tdSql.checkRows(8)
tdSql.error(f'create topic t1 with meta as database d2', expectErrInfo="Database not exist")
tdSql.error(f'create topic t1 as database d2', expectErrInfo="Database not exist")
tdSql.error(f'create topic t2 as select * from st2', expectErrInfo="Fail to get table info, error: Table does not exist")
tdSql.error(f'create topic t3 as stable st2', expectErrInfo="STable not exist")
tdSql.error(f'create topic t3 with meta as stable st2', expectErrInfo="STable not exist")
tdSql.execute(f'create topic t1 with meta as database d1')
consumer_dict = {
"group.id": "g1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "earliest",
# "msg.enable.batchmeta": "true",
"experimental.snapshot.enable": "true",
}
consumer1 = Consumer(consumer_dict)
try:
consumer1.subscribe(["t1"])
except TmqError:
tdLog.exit(f"subscribe error")
index = 0
try:
while True:
res = consumer1.poll(1)
if not res:
if index != 1:
tdLog.exit("consume error")
break
val = res.value()
if val is None:
continue
cnt = 0;
for block in val:
cnt += len(block.fetchall())
if cnt != 8:
tdLog.exit("consume error")
index += 1
finally:
consumer1.close()
tdSql.query(f'show consumers')
tdSql.checkRows(0)
tdSql.execute(f'drop topic t1')
tdSql.execute(f'drop database d1')
def run(self):
self.td_31283_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -27,14 +27,15 @@
,,y,army,./pytest.sh python3 ./test.py -f insert/insert_basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f cluster/splitVgroupByLearner.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f authorith/authBasic.py -N 3
# ,,n,army,python3 ./test.py -f cmdline/fullopt.py
,,n,army,python3 ./test.py -f query/show.py -N 3
,,n,army,python3 ./test.py -f alter/alterConfig.py -N 3
,,n,army,python3 ./test.py -f cmdline/fullopt.py
,,y,army,./pytest.sh python3 ./test.py -f query/show.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f alter/alterConfig.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/subquery/subqueryBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f storage/oneStageComp.py -N 3 -L 3 -D 1
,,y,army,./pytest.sh python3 ./test.py -f storage/compressBasic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f grant/grantBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/queryBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f tmq/tmqBugs.py -N 3
#
# system test
@ -162,6 +163,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/stt_blocks_check.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/agg_null.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 2
@ -279,8 +281,8 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3 -i True
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-db-removewal.py -N 2 -n 1
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb-removewal.py -N 6 -n 3
#,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 2 -n 1
#,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 6 -n 3
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 2 -n 1
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 6 -n 3
#,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-db.py -N 6 -n 3
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 2 -n 1
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py -N 3 -n 3
@ -340,7 +342,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroup.py -N 3 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroupWal.py -N 3 -n 3
,,n,system-test,python3 ./test.py -f 0-others/timeRangeWise.py -N 3
#,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/delete_check.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/delete_check.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/test_hot_refresh_configurations.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/subscribe_stream_privilege.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/empty_identifier.py

View File

@ -9,3 +9,4 @@ requests
pexpect
faker
pyopenssl
hyperloglog

View File

@ -27,8 +27,8 @@ DLL_EXPORT int32_t bit_and(SUdfDataBlock* block, SUdfColumn* resultCol) {
for (int32_t i = 0; i < block->numOfRows; ++i) {
if (udfColDataIsNull(block->udfCols[0], i)) {
udfTrace("block:%p, row:%d result is null since col:0 is null", block, i);
udfColDataSetNull(resultCol, i);
udfTrace("block:%p, row:%d result is null since col:0 is null", block, i);
continue;
}
@ -38,8 +38,8 @@ DLL_EXPORT int32_t bit_and(SUdfDataBlock* block, SUdfColumn* resultCol) {
int32_t j = 1;
for (; j < block->numOfCols; ++j) {
if (udfColDataIsNull(block->udfCols[j], i)) {
udfTrace("block:%p, row:%d result is null since col:%d is null", block, i, j);
udfColDataSetNull(resultCol, i);
udfTrace("block:%p, row:%d result is null since col:%d is null", block, i, j);
break;
}
@ -55,7 +55,7 @@ DLL_EXPORT int32_t bit_and(SUdfDataBlock* block, SUdfColumn* resultCol) {
}
resultData->numOfRows = block->numOfRows;
udfTrace("block:%p, processing completed, rows:%d, cols:%d,", block, block->numOfRows, block->numOfCols);
udfTrace("block:%p, processing completed", block);
return TSDB_CODE_SUCCESS;
}

View File

@ -1,52 +1,63 @@
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taosudf.h"
DLL_EXPORT int32_t l2norm_init() {
return 0;
}
DLL_EXPORT int32_t l2norm_init() { return 0; }
DLL_EXPORT int32_t l2norm_destroy() {
return 0;
}
DLL_EXPORT int32_t l2norm_destroy() { return 0; }
DLL_EXPORT int32_t l2norm_start(SUdfInterBuf* buf) {
int32_t bufLen = sizeof(double);
if (buf->bufLen < bufLen) {
udfError("failed to execute udf since input buflen:%d < %d", buf->bufLen, bufLen);
return TSDB_CODE_UDF_INVALID_BUFSIZE;
}
udfTrace("start aggregation, buflen:%d used:%d", buf->bufLen, bufLen);
*(int64_t*)(buf->buf) = 0;
buf->bufLen = sizeof(double);
buf->numOfResult = 1;
buf->bufLen = bufLen;
buf->numOfResult = 0;
return 0;
}
DLL_EXPORT int32_t l2norm(SUdfDataBlock* block, SUdfInterBuf* interBuf, SUdfInterBuf* newInterBuf) {
double sumSquares = *(double*)interBuf->buf;
int8_t numNotNull = 0;
udfTrace("block:%p, processing begins, cols:%d rows:%d", block, block->numOfCols, block->numOfRows);
for (int32_t i = 0; i < block->numOfCols; ++i) {
SUdfColumn* col = block->udfCols[i];
if (!(col->colMeta.type == TSDB_DATA_TYPE_INT ||
col->colMeta.type == TSDB_DATA_TYPE_DOUBLE)) {
if (col->colMeta.type != TSDB_DATA_TYPE_INT && col->colMeta.type != TSDB_DATA_TYPE_DOUBLE) {
udfError("block:%p, col:%d type:%d should be int(%d) or double(%d)", block, i, col->colMeta.type,
TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_DOUBLE);
return TSDB_CODE_UDF_INVALID_INPUT;
}
}
double sumSquares = *(double*)interBuf->buf;
int8_t numNotNull = 0;
for (int32_t i = 0; i < block->numOfCols; ++i) {
for (int32_t j = 0; j < block->numOfRows; ++j) {
SUdfColumn* col = block->udfCols[i];
if (udfColDataIsNull(col, j)) {
udfTrace("block:%p, col:%d row:%d is null", block, i, j);
continue;
}
switch (col->colMeta.type) {
case TSDB_DATA_TYPE_INT: {
char* cell = udfColDataGetData(col, j);
int32_t num = *(int32_t*)cell;
sumSquares += (double)num * num;
udfTrace("block:%p, col:%d row:%d data:%d", block, i, j, num);
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
char* cell = udfColDataGetData(col, j);
double num = *(double*)cell;
sumSquares += num * num;
udfTrace("block:%p, col:%d row:%d data:%f", block, i, j, num);
break;
}
default:
@ -54,11 +65,14 @@ DLL_EXPORT int32_t l2norm(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInte
}
++numNotNull;
}
udfTrace("block:%p, col:%d result is %f", block, i, sumSquares);
}
*(double*)(newInterBuf->buf) = sumSquares;
newInterBuf->bufLen = sizeof(double);
newInterBuf->numOfResult = 1;
udfTrace("block:%p, result is %f", block, sumSquares);
return 0;
}
@ -67,5 +81,7 @@ DLL_EXPORT int32_t l2norm_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) {
*(double*)(resultData->buf) = sqrt(sumSquares);
resultData->bufLen = sizeof(double);
resultData->numOfResult = 1;
udfTrace("end aggregation, result is %f", *(double*)(resultData->buf));
return 0;
}

View File

@ -1,55 +1,53 @@
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taosudf.h"
#define STR_MAX_LEN 256 // inter buffer length
// init
DLL_EXPORT int32_t max_vol_init()
{
return 0;
DLL_EXPORT int32_t max_vol_init() { return 0; }
DLL_EXPORT int32_t max_vol_destroy() { return 0; }
DLL_EXPORT int32_t max_vol_start(SUdfInterBuf *buf) {
int32_t bufLen = sizeof(float) + STR_MAX_LEN;
if (buf->bufLen < bufLen) {
udfError("failed to execute udf since input buflen:%d < %d", buf->bufLen, bufLen);
return TSDB_CODE_UDF_INVALID_BUFSIZE;
}
// destory
DLL_EXPORT int32_t max_vol_destroy()
{
return 0;
}
// start
DLL_EXPORT int32_t max_vol_start(SUdfInterBuf *buf)
{
udfTrace("start aggregation, buflen:%d used:%d", buf->bufLen, bufLen);
memset(buf->buf, 0, sizeof(float) + STR_MAX_LEN);
// set init value
*((float*)buf->buf) = -10000000;
buf->bufLen = sizeof(float) + STR_MAX_LEN;
*((float *)buf->buf) = INT32_MIN;
buf->bufLen = bufLen;
buf->numOfResult = 0;
return 0;
}
DLL_EXPORT int32_t max_vol(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) {
udfTrace("block:%p, processing begins, cols:%d rows:%d", block, block->numOfCols, block->numOfRows);
float maxValue = *(float *)interBuf->buf;
char strBuff[STR_MAX_LEN] = "inter1buf";
if (block->numOfCols < 2)
{
if (block->numOfCols < 2) {
udfError("block:%p, cols:%d needs to be greater than 2", block, block->numOfCols);
return TSDB_CODE_UDF_INVALID_INPUT;
}
// check data type
for (int32_t i = 0; i < block->numOfCols; ++i)
{
for (int32_t i = 0; i < block->numOfCols; ++i) {
SUdfColumn *col = block->udfCols[i];
if (i == block->numOfCols - 1) {
// last column is device id , must varchar
if (col->colMeta.type != TSDB_DATA_TYPE_VARCHAR) {
udfError("block:%p, col:%d type:%d should be varchar(%d)", block, i, col->colMeta.type, TSDB_DATA_TYPE_VARCHAR);
return TSDB_CODE_UDF_INVALID_INPUT;
}
} else {
if (col->colMeta.type != TSDB_DATA_TYPE_FLOAT) {
udfError("block:%p, col:%d type:%d should be float(%d)", block, i, col->colMeta.type, TSDB_DATA_TYPE_FLOAT);
return TSDB_CODE_UDF_INVALID_INPUT;
}
}
@ -57,33 +55,45 @@ DLL_EXPORT int32_t max_vol(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdfInt
// calc max voltage
SUdfColumn *lastCol = block->udfCols[block->numOfCols - 1];
for (int32_t i = 0; i < (block->numOfCols - 1); ++i) {
for (int32_t i = 0; i < block->numOfCols - 1; ++i) {
for (int32_t j = 0; j < block->numOfRows; ++j) {
SUdfColumn *col = block->udfCols[i];
if (udfColDataIsNull(col, j)) {
udfTrace("block:%p, col:%d row:%d is null", block, i, j);
continue;
}
char *data = udfColDataGetData(col, j);
float voltage = *(float *)data;
if (voltage > maxValue) {
if (voltage <= maxValue) {
udfTrace("block:%p, col:%d row:%d data:%f", block, i, j, voltage);
} else {
maxValue = voltage;
char *valData = udfColDataGetData(lastCol, j);
int32_t valDataLen = udfColDataGetDataLen(lastCol, j);
// get device id
char *deviceId = valData + sizeof(uint16_t);
sprintf(strBuff, "%s_(%d,%d)_%f", deviceId, j, i, maxValue);
int32_t deviceIdLen = valDataLen < (STR_MAX_LEN - 1) ? valDataLen : (STR_MAX_LEN - 1);
strncpy(strBuff, deviceId, deviceIdLen);
snprintf(strBuff + deviceIdLen, STR_MAX_LEN - deviceIdLen, "_(%d,%d)_%f", j, i, maxValue);
udfTrace("block:%p, col:%d row:%d data:%f, as max_val:%s", block, i, j, voltage, strBuff);
}
}
}
*(float *)newInterBuf->buf = maxValue;
strcpy(newInterBuf->buf + sizeof(float), strBuff);
strncpy(newInterBuf->buf + sizeof(float), strBuff, STR_MAX_LEN);
newInterBuf->bufLen = sizeof(float) + strlen(strBuff) + 1;
newInterBuf->numOfResult = 1;
udfTrace("block:%p, result is %s", block, strBuff);
return 0;
}
DLL_EXPORT int32_t max_vol_finish(SUdfInterBuf *buf, SUdfInterBuf *resultData)
{
DLL_EXPORT int32_t max_vol_finish(SUdfInterBuf *buf, SUdfInterBuf *resultData) {
char *str = buf->buf + sizeof(float);
// copy to des
char *des = resultData->buf + sizeof(uint16_t);
@ -97,5 +107,7 @@ DLL_EXPORT int32_t max_vol_finish(SUdfInterBuf *buf, SUdfInterBuf *resultData)
resultData->bufLen = len + sizeof(uint16_t);
// set row count
resultData->numOfResult = 1;
udfTrace("end aggregation, result is %s", str);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More