Merge branch '3.0' of github.com:taosdata/TDengine into szhou/feature/sma
This commit is contained in:
commit
589ecd75ba
|
@ -62,7 +62,7 @@ TDengine的主要功能如下:
|
||||||
|
|
||||||
<figure>
|
<figure>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
<center>图 1. TDengine技术生态图</center>
|
<center>图 1. TDengine技术生态图</center>
|
||||||
|
|
|
@ -11,7 +11,7 @@ TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如
|
||||||
|
|
||||||
INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。
|
INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
INTERVAL 和 SLIDING 子句需要配合聚合和选择函数来使用。以下 SQL 语句非法:
|
INTERVAL 和 SLIDING 子句需要配合聚合和选择函数来使用。以下 SQL 语句非法:
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ _ 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为
|
||||||
|
|
||||||
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
|
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
使用 STATE_WINDOW 来确定状态窗口划分的列。例如:
|
使用 STATE_WINDOW 来确定状态窗口划分的列。例如:
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
|
||||||
|
|
||||||
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。
|
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
|
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ title: 连接器
|
||||||
|
|
||||||
TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
|
TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 支持的平台
|
## 支持的平台
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。
|
`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
上图显示了两种 Java 应用使用连接器访问 TDengine 的两种方式:
|
上图显示了两种 Java 应用使用连接器访问 TDengine 的两种方式:
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ taosAdapter 提供以下功能:
|
||||||
|
|
||||||
## taosAdapter 架构图
|
## taosAdapter 架构图
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## taosAdapter 部署方法
|
## taosAdapter 部署方法
|
||||||
|
|
||||||
|
|
|
@ -233,25 +233,25 @@ sudo systemctl enable grafana-server
|
||||||
|
|
||||||
指向 **Configurations** -> **Data Sources** 菜单,然后点击 **Add data source** 按钮。
|
指向 **Configurations** -> **Data Sources** 菜单,然后点击 **Add data source** 按钮。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
搜索并选择**TDengine**。
|
搜索并选择**TDengine**。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
配置 TDengine 数据源。
|
配置 TDengine 数据源。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
保存并测试,正常情况下会报告 'TDengine Data source is working'。
|
保存并测试,正常情况下会报告 'TDengine Data source is working'。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 导入仪表盘
|
### 导入仪表盘
|
||||||
|
|
||||||
指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。
|
指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。
|
在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。
|
||||||
|
|
||||||
|
@ -259,7 +259,7 @@ sudo systemctl enable grafana-server
|
||||||
|
|
||||||
导入完成后,TDinsight 的完整页面视图如下所示。
|
导入完成后,TDinsight 的完整页面视图如下所示。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## TDinsight 仪表盘详细信息
|
## TDinsight 仪表盘详细信息
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
||||||
|
|
||||||
### 集群状态
|
### 集群状态
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
这部分包括集群当前信息和状态,告警信息也在此处(从左到右,从上到下)。
|
这部分包括集群当前信息和状态,告警信息也在此处(从左到右,从上到下)。
|
||||||
|
|
||||||
|
@ -289,7 +289,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
||||||
|
|
||||||
### DNodes 状态
|
### DNodes 状态
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
- **DNodes Status**:`show dnodes` 的简单表格视图。
|
- **DNodes Status**:`show dnodes` 的简单表格视图。
|
||||||
- **DNodes Lifetime**:从创建 dnode 开始经过的时间。
|
- **DNodes Lifetime**:从创建 dnode 开始经过的时间。
|
||||||
|
@ -298,14 +298,14 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
||||||
|
|
||||||
### MNode 概述
|
### MNode 概述
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
1. **MNodes Status**:`show mnodes` 的简单表格视图。
|
1. **MNodes Status**:`show mnodes` 的简单表格视图。
|
||||||
2. **MNodes Number**:类似于`DNodes Number`,MNodes 数量变化。
|
2. **MNodes Number**:类似于`DNodes Number`,MNodes 数量变化。
|
||||||
|
|
||||||
### 请求
|
### 请求
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。
|
1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。
|
||||||
2. **Requests (Selects)**:查询请求数及变化率(count of second)。
|
2. **Requests (Selects)**:查询请求数及变化率(count of second)。
|
||||||
|
@ -313,7 +313,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
||||||
|
|
||||||
### 数据库
|
### 数据库
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
数据库使用情况,对变量 `$database` 的每个值即每个数据库进行重复多行展示。
|
数据库使用情况,对变量 `$database` 的每个值即每个数据库进行重复多行展示。
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
||||||
|
|
||||||
### DNode 资源使用情况
|
### DNode 资源使用情况
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
数据节点资源使用情况展示,对变量 `$fqdn` 即每个数据节点进行重复多行展示。包括:
|
数据节点资源使用情况展示,对变量 `$fqdn` 即每个数据节点进行重复多行展示。包括:
|
||||||
|
|
||||||
|
@ -346,13 +346,13 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
|
||||||
|
|
||||||
### 登录历史
|
### 登录历史
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
目前只报告每分钟登录次数。
|
目前只报告每分钟登录次数。
|
||||||
|
|
||||||
### 监控 taosAdapter
|
### 监控 taosAdapter
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
支持监控 taosAdapter 请求统计和状态详情。包括:
|
支持监控 taosAdapter 请求统计和状态详情。包括:
|
||||||
|
|
||||||
|
|
|
@ -64,15 +64,15 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
|
||||||
|
|
||||||
用户可以直接通过 http://localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
|
用户可以直接通过 http://localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示:
|
点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
进入数据源配置页面,按照默认提示修改相应配置即可:
|
进入数据源配置页面,按照默认提示修改相应配置即可:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 http://localhost:6041。
|
- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 http://localhost:6041。
|
||||||
- User:TDengine 用户名。
|
- User:TDengine 用户名。
|
||||||
|
@ -80,13 +80,13 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
|
||||||
|
|
||||||
点击 `Save & Test` 进行测试,成功会有如下提示:
|
点击 `Save & Test` 进行测试,成功会有如下提示:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 创建 Dashboard
|
### 创建 Dashboard
|
||||||
|
|
||||||
回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面:
|
回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下:
|
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下:
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
|
||||||
|
|
||||||
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
|
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
> 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。
|
> 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。
|
||||||
|
|
||||||
|
|
|
@ -45,25 +45,25 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
|
||||||
|
|
||||||
使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public`
|
使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public`
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 创建规则(Rule)
|
### 创建规则(Rule)
|
||||||
|
|
||||||
选择左侧“规则引擎(Rule Engine)”中的“规则(Rule)”并点击“创建(Create)”按钮:
|
选择左侧“规则引擎(Rule Engine)”中的“规则(Rule)”并点击“创建(Create)”按钮:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 编辑 SQL 字段
|
### 编辑 SQL 字段
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 新增“动作(action handler)”
|
### 新增“动作(action handler)”
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 新增“资源(Resource)”
|
### 新增“资源(Resource)”
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
选择“发送数据到 Web 服务“并点击“新建资源”按钮:
|
选择“发送数据到 Web 服务“并点击“新建资源”按钮:
|
||||||
|
|
||||||
|
@ -71,13 +71,13 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
|
||||||
|
|
||||||
选择“发送数据到 Web 服务“并填写 请求 URL 为 运行 taosAdapter 的服务器地址和端口(默认为 6041)。其他属性请保持默认值。
|
选择“发送数据到 Web 服务“并填写 请求 URL 为 运行 taosAdapter 的服务器地址和端口(默认为 6041)。其他属性请保持默认值。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 编辑“动作(action)”
|
### 编辑“动作(action)”
|
||||||
|
|
||||||
编辑资源配置,增加 Authorization 认证的键/值配对项,相关文档请参考[ TDengine REST API 文档](https://docs.taosdata.com/reference/rest-api/)。在消息体中输入规则引擎替换模板。
|
编辑资源配置,增加 Authorization 认证的键/值配对项,相关文档请参考[ TDengine REST API 文档](https://docs.taosdata.com/reference/rest-api/)。在消息体中输入规则引擎替换模板。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 编写模拟测试程序
|
## 编写模拟测试程序
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
|
||||||
|
|
||||||
注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。
|
注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 执行测试模拟发送 MQTT 数据
|
## 执行测试模拟发送 MQTT 数据
|
||||||
|
|
||||||
|
@ -173,19 +173,19 @@ npm install mqtt mockjs --save --registry=https://registry.npm.taobao.org
|
||||||
node mock.js
|
node mock.js
|
||||||
```
|
```
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 验证 EMQX 接收到数据
|
## 验证 EMQX 接收到数据
|
||||||
|
|
||||||
在 EMQX Dashboard 规则引擎界面进行刷新,可以看到有多少条记录被正确接收到:
|
在 EMQX Dashboard 规则引擎界面进行刷新,可以看到有多少条记录被正确接收到:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 验证数据写入到 TDengine
|
## 验证数据写入到 TDengine
|
||||||
|
|
||||||
使用 TDengine CLI 程序登录并查询相应数据库和表,验证数据是否被正确写入到 TDengine 中:
|
使用 TDengine CLI 程序登录并查询相应数据库和表,验证数据是否被正确写入到 TDengine 中:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。
|
TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。
|
||||||
EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。
|
EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。
|
||||||
|
|
|
@ -9,11 +9,11 @@ TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDeng
|
||||||
|
|
||||||
Kafka Connect 是 Apache Kafka 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。
|
Kafka Connect 是 Apache Kafka 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送给 Kafka Connect。TDengine Sink Connector 用于 从 Kafka Connect 接收数据并写入 TDengine。
|
TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送给 Kafka Connect。TDengine Sink Connector 用于 从 Kafka Connect 接收数据并写入 TDengine。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 什么是 Confluent?
|
## 什么是 Confluent?
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ Confluent 在 Kafka 的基础上增加很多扩展功能。包括:
|
||||||
5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
|
5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
|
||||||
|
|
||||||
这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
|
这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
|
||||||

|

|
||||||
|
|
||||||
Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
|
Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何
|
||||||
|
|
||||||
TDengine 分布式架构的逻辑结构图如下:
|
TDengine 分布式架构的逻辑结构图如下:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center> 图 1 TDengine架构示意图 </center>
|
<center> 图 1 TDengine架构示意图 </center>
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
||||||
|
|
||||||
为解释 vnode、mnode、taosc 和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。
|
为解释 vnode、mnode、taosc 和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center> 图 2 TDengine 典型的操作流程 </center>
|
<center> 图 2 TDengine 典型的操作流程 </center>
|
||||||
|
|
||||||
|
@ -135,7 +135,7 @@ TDengine 除 vnode 分片之外,还对时序数据按照时间段进行分区
|
||||||
|
|
||||||
Master Vnode 遵循下面的写入流程:
|
Master Vnode 遵循下面的写入流程:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center> 图 3 TDengine Master 写入流程 </center>
|
<center> 图 3 TDengine Master 写入流程 </center>
|
||||||
|
|
||||||
|
@ -150,7 +150,7 @@ Master Vnode 遵循下面的写入流程:
|
||||||
|
|
||||||
对于 slave vnode,写入流程是:
|
对于 slave vnode,写入流程是:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center> 图 4 TDengine Slave 写入流程 </center>
|
<center> 图 4 TDengine Slave 写入流程 </center>
|
||||||
|
|
||||||
|
@ -284,7 +284,7 @@ SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14
|
||||||
|
|
||||||
TDengine 对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine 引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个 STable 下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示:
|
TDengine 对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine 引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个 STable 下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center> 图 5 多表聚合查询原理图 </center>
|
<center> 图 5 多表聚合查询原理图 </center>
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
|
||||||
|
|
||||||
本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + Telegraf + Grafana 的 IT 运维系统。架构如下图:
|
本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + Telegraf + Grafana 的 IT 运维系统。架构如下图:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 安装步骤
|
## 安装步骤
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ sudo systemctl start telegraf
|
||||||
点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。
|
点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。
|
||||||
点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json` 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
|
点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json` 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
|
||||||
|
|
||||||
![IT-DevOps-Solutions-telegraf-dashboard.webp]./IT-DevOps-Solutions-telegraf-dashboard.webp)
|

|
||||||
|
|
||||||
## 总结
|
## 总结
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
|
||||||
|
|
||||||
本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + collectd / statsD + Grafana 的 IT 运维系统。架构如下图:
|
本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + collectd / statsD + Grafana 的 IT 运维系统。架构如下图:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 安装步骤
|
## 安装步骤
|
||||||
|
|
||||||
|
@ -81,12 +81,12 @@ repeater 部分添加 { host:'<TDengine server/cluster host>', port: <port for S
|
||||||
|
|
||||||
从 https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 下载 dashboard json 文件,点击左侧加号图标并选择 `Import`,按照界面提示选择 JSON 文件导入。之后可以看到如下界面的仪表盘:
|
从 https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 下载 dashboard json 文件,点击左侧加号图标并选择 `Import`,按照界面提示选择 JSON 文件导入。之后可以看到如下界面的仪表盘:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### 导入 StatsD 仪表盘
|
#### 导入 StatsD 仪表盘
|
||||||
|
|
||||||
从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json` 下载 dashboard json 文件,点击左侧加号图标并选择 `Import`,按照界面提示导入 JSON 文件。之后可以看到如下界面的仪表盘:
|
从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json` 下载 dashboard json 文件,点击左侧加号图标并选择 `Import`,按照界面提示导入 JSON 文件。之后可以看到如下界面的仪表盘:
|
||||||

|

|
||||||
|
|
||||||
## 总结
|
## 总结
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ title: OpenTSDB 应用迁移到 TDengine 的最佳实践
|
||||||
一个典型的 DevOps 应用场景的系统整体的架构如下图(图 1) 所示。
|
一个典型的 DevOps 应用场景的系统整体的架构如下图(图 1) 所示。
|
||||||
|
|
||||||
**图 1. DevOps 场景中典型架构**
|
**图 1. DevOps 场景中典型架构**
|
||||||

|

|
||||||
|
|
||||||
在该应用场景中,包含了部署在应用环境中负责收集机器度量(Metrics)、网络度量(Metrics)以及应用度量(Metrics)的 Agent 工具、汇聚 Agent 收集信息的数据收集器,数据持久化存储和管理的系统以及监控数据可视化工具(例如:Grafana 等)。
|
在该应用场景中,包含了部署在应用环境中负责收集机器度量(Metrics)、网络度量(Metrics)以及应用度量(Metrics)的 Agent 工具、汇聚 Agent 收集信息的数据收集器,数据持久化存储和管理的系统以及监控数据可视化工具(例如:Grafana 等)。
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ LoadPlugin write_tsdb
|
||||||
TDengine 提供了默认的两套 Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到 Grafana 中即可激活使用。
|
TDengine 提供了默认的两套 Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到 Grafana 中即可激活使用。
|
||||||
|
|
||||||
**图 2. 导入 Grafana 模板**
|
**图 2. 导入 Grafana 模板**
|
||||||

|

|
||||||
|
|
||||||
操作完以上步骤后,就完成了将 OpenTSDB 替换成为 TDengine 的迁移工作。可以看到整个流程非常简单,不需要写代码,只需要对某些配置文件进行调整即可完成全部的迁移工作。
|
操作完以上步骤后,就完成了将 OpenTSDB 替换成为 TDengine 的迁移工作。可以看到整个流程非常简单,不需要写代码,只需要对某些配置文件进行调整即可完成全部的迁移工作。
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ TDengine 提供了默认的两套 Dashboard 模板,用户只需要将 Grafana
|
||||||
如果你的应用特别复杂,或者应用领域并不是 DevOps 场景,你可以继续阅读后续的章节,更加全面深入地了解将 OpenTSDB 的应用迁移到 TDengine 的高级话题。
|
如果你的应用特别复杂,或者应用领域并不是 DevOps 场景,你可以继续阅读后续的章节,更加全面深入地了解将 OpenTSDB 的应用迁移到 TDengine 的高级话题。
|
||||||
|
|
||||||
**图 3. 迁移完成后的系统架构**
|
**图 3. 迁移完成后的系统架构**
|
||||||

|

|
||||||
|
|
||||||
## 其他场景的迁移评估与策略
|
## 其他场景的迁移评估与策略
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ With TDengine, the total cost of ownership of your time-series data platform can
|
||||||
## Technical Ecosystem
|
## Technical Ecosystem
|
||||||
This is how TDengine would be situated, in a typical time-series data processing platform:
|
This is how TDengine would be situated, in a typical time-series data processing platform:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center>Figure 1. TDengine Technical Ecosystem</center>
|
<center>Figure 1. TDengine Technical Ecosystem</center>
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ Window related clauses are used to divide the data set to be queried into subset
|
||||||
|
|
||||||
`INTERVAL` clause is used to generate time windows of the same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window.
|
`INTERVAL` clause is used to generate time windows of the same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`.
|
`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`.
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ When the time length specified by `SLIDING` is the same as that specified by `IN
|
||||||
|
|
||||||
In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
|
In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
`STATE_WINDOW` is used to specify the column based on which to define status window, for example:
|
`STATE_WINDOW` is used to specify the column based on which to define status window, for example:
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
|
||||||
|
|
||||||
The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now.
|
If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now.
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ title: Connector
|
||||||
|
|
||||||
TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Supported platforms
|
## Supported platforms
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). REST connections implement has a slight differences to compare the set of features implemented and native connections.
|
'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). REST connections implement has a slight differences to compare the set of features implemented and native connections.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The preceding diagram shows two ways for a Java app to access TDengine via connector:
|
The preceding diagram shows two ways for a Java app to access TDengine via connector:
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ taosAdapter provides the following features.
|
||||||
|
|
||||||
## taosAdapter architecture diagram
|
## taosAdapter architecture diagram
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## taosAdapter Deployment Method
|
## taosAdapter Deployment Method
|
||||||
|
|
||||||
|
|
|
@ -233,33 +233,33 @@ The default username/password is `admin`. Grafana will require a password change
|
||||||
|
|
||||||
Point to the **Configurations** -> **Data Sources** menu, and click the **Add data source** button.
|
Point to the **Configurations** -> **Data Sources** menu, and click the **Add data source** button.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Search for and select **TDengine**.
|
Search for and select **TDengine**.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Configure the TDengine datasource.
|
Configure the TDengine datasource.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Save and test. It will report 'TDengine Data source is working' under normal circumstances.
|
Save and test. It will report 'TDengine Data source is working' under normal circumstances.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Importing dashboards
|
### Importing dashboards
|
||||||
|
|
||||||
Point to **+** / **Create** - **import** (or `/dashboard/import` url).
|
Point to **+** / **Create** - **import** (or `/dashboard/import` url).
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**.
|
Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Once the import is complete, the full page view of TDinsight is shown below.
|
Once the import is complete, the full page view of TDinsight is shown below.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## TDinsight dashboard details
|
## TDinsight dashboard details
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ Details of the metrics are as follows.
|
||||||
|
|
||||||
### Cluster Status
|
### Cluster Status
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom).
|
This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom).
|
||||||
|
|
||||||
|
@ -289,7 +289,7 @@ This section contains the current information and status of the cluster, the ale
|
||||||
|
|
||||||
### DNodes Status
|
### DNodes Status
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
- **DNodes Status**: simple table view of `show dnodes`.
|
- **DNodes Status**: simple table view of `show dnodes`.
|
||||||
- **DNodes Lifetime**: the time elapsed since the dnode was created.
|
- **DNodes Lifetime**: the time elapsed since the dnode was created.
|
||||||
|
@ -298,14 +298,14 @@ This section contains the current information and status of the cluster, the ale
|
||||||
|
|
||||||
### MNode Overview
|
### MNode Overview
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
1. **MNodes Status**: a simple table view of `show mnodes`. 2.
|
1. **MNodes Status**: a simple table view of `show mnodes`. 2.
|
||||||
2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes.
|
2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes.
|
||||||
|
|
||||||
### Request
|
### Request
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
1. **Requests Rate(Inserts per Second)**: average number of inserts per second.
|
1. **Requests Rate(Inserts per Second)**: average number of inserts per second.
|
||||||
2. **Requests (Selects)**: number of query requests and change rate (count of second).
|
2. **Requests (Selects)**: number of query requests and change rate (count of second).
|
||||||
|
@ -313,7 +313,7 @@ This section contains the current information and status of the cluster, the ale
|
||||||
|
|
||||||
### Database
|
### Database
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database.
|
Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database.
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ Database usage, repeated for each value of the variable `$database` i.e. multipl
|
||||||
|
|
||||||
### DNode Resource Usage
|
### DNode Resource Usage
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes.
|
Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes.
|
||||||
|
|
||||||
|
@ -346,13 +346,13 @@ Data node resource usage display with repeated multiple rows for the variable `$
|
||||||
|
|
||||||
### Login History
|
### Login History
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Currently, only the number of logins per minute is reported.
|
Currently, only the number of logins per minute is reported.
|
||||||
|
|
||||||
### Monitoring taosAdapter
|
### Monitoring taosAdapter
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Support monitoring taosAdapter request statistics and status details. Includes.
|
Support monitoring taosAdapter request statistics and status details. Includes.
|
||||||
|
|
||||||
|
|
|
@ -62,15 +62,15 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
|
||||||
|
|
||||||
Users can log in to the Grafana server (username/password: admin/admin) directly through the URL `http://localhost:3000` and add a datasource through `Configuration -> Data Sources` on the left side, as shown in the following figure.
|
Users can log in to the Grafana server (username/password: admin/admin) directly through the URL `http://localhost:3000` and add a datasource through `Configuration -> Data Sources` on the left side, as shown in the following figure.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure.
|
Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration.
|
Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
- Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`.
|
- Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`.
|
||||||
- User: TDengine user name.
|
- User: TDengine user name.
|
||||||
|
@ -78,13 +78,13 @@ Enter the datasource configuration page, and follow the default prompts to modif
|
||||||
|
|
||||||
Click `Save & Test` to test. Follows are a success.
|
Click `Save & Test` to test. Follows are a success.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Create Dashboard
|
### Create Dashboard
|
||||||
|
|
||||||
Go back to the main interface to create the Dashboard, click Add Query to enter the panel query page:
|
Go back to the main interface to create the Dashboard, click Add Query to enter the panel query page:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query.
|
As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query.
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
|
||||||
|
|
||||||
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
> For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/).
|
> For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/).
|
||||||
|
|
||||||
|
|
|
@ -44,25 +44,25 @@ Since the configuration interface of EMQX differs from version to version, here
|
||||||
|
|
||||||
Use your browser to open the URL `http://IP:18083` and log in to EMQX Dashboard. The initial installation username is `admin` and the password is: `public`.
|
Use your browser to open the URL `http://IP:18083` and log in to EMQX Dashboard. The initial installation username is `admin` and the password is: `public`.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Creating Rule
|
### Creating Rule
|
||||||
|
|
||||||
Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
|
Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Edit SQL fields
|
### Edit SQL fields
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Add "action handler"
|
### Add "action handler"
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Add "Resource"
|
### Add "Resource"
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Select "Data to Web Service" and click the "New Resource" button.
|
Select "Data to Web Service" and click the "New Resource" button.
|
||||||
|
|
||||||
|
@ -70,13 +70,13 @@ Select "Data to Web Service" and click the "New Resource" button.
|
||||||
|
|
||||||
Select "Data to Web Service" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values.
|
Select "Data to Web Service" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Edit "action"
|
### Edit "action"
|
||||||
|
|
||||||
Edit the resource configuration to add the key/value pairing for Authorization. Please refer to the [ TDengine REST API documentation ](https://docs.taosdata.com/reference/rest-api/) for the authorization in details. Enter the rule engine replacement template in the message body.
|
Edit the resource configuration to add the key/value pairing for Authorization. Please refer to the [ TDengine REST API documentation ](https://docs.taosdata.com/reference/rest-api/) for the authorization in details. Enter the rule engine replacement template in the message body.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Compose program to mock data
|
## Compose program to mock data
|
||||||
|
|
||||||
|
@ -163,7 +163,7 @@ Edit the resource configuration to add the key/value pairing for Authorization.
|
||||||
|
|
||||||
Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients.
|
Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Execute tests to simulate sending MQTT data
|
## Execute tests to simulate sending MQTT data
|
||||||
|
|
||||||
|
@ -172,19 +172,19 @@ npm install mqtt mockjs --save ---registry=https://registry.npm.taobao.org
|
||||||
node mock.js
|
node mock.js
|
||||||
```
|
```
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Verify that EMQX is receiving data
|
## Verify that EMQX is receiving data
|
||||||
|
|
||||||
Refresh the EMQX Dashboard rules engine interface to see how many records were received correctly:
|
Refresh the EMQX Dashboard rules engine interface to see how many records were received correctly:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Verify that data writing to TDengine
|
## Verify that data writing to TDengine
|
||||||
|
|
||||||
Use the TDengine CLI program to log in and query the appropriate databases and tables to verify that the data is being written to TDengine correctly:
|
Use the TDengine CLI program to log in and query the appropriate databases and tables to verify that the data is being written to TDengine correctly:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
|
Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
|
||||||
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
|
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
|
||||||
|
|
|
@ -9,11 +9,11 @@ TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDe
|
||||||
|
|
||||||
Kafka Connect is a component of Apache Kafka that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect.
|
Kafka Connect is a component of Apache Kafka that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
TDengine Source Connector is used to read data from TDengine in real-time and send it to Kafka Connect. Users can use The TDengine Sink Connector to receive data from Kafka Connect and write it to TDengine.
|
TDengine Source Connector is used to read data from TDengine in real-time and send it to Kafka Connect. Users can use The TDengine Sink Connector to receive data from Kafka Connect and write it to TDengine.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## What is Confluent?
|
## What is Confluent?
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ Confluent adds many extensions to Kafka. include:
|
||||||
5. GUI for managing and monitoring Kafka - Confluent Control Center
|
5. GUI for managing and monitoring Kafka - Confluent Control Center
|
||||||
|
|
||||||
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
|
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
|
||||||

|

|
||||||
|
|
||||||
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
|
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ The design of TDengine is based on the assumption that any hardware or software
|
||||||
|
|
||||||
Logical structure diagram of TDengine distributed architecture as following:
|
Logical structure diagram of TDengine distributed architecture as following:
|
||||||
|
|
||||||

|

|
||||||
<center> Figure 1: TDengine architecture diagram </center>
|
<center> Figure 1: TDengine architecture diagram </center>
|
||||||
|
|
||||||
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
|
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
|
||||||
|
@ -54,7 +54,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
|
||||||
|
|
||||||
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.
|
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.
|
||||||
|
|
||||||

|

|
||||||
<center> Figure 2: Typical process of TDengine </center>
|
<center> Figure 2: Typical process of TDengine </center>
|
||||||
|
|
||||||
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
|
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
|
||||||
|
@ -123,7 +123,7 @@ If a database has N replicas, thus a virtual node group has N virtual nodes, but
|
||||||
|
|
||||||
Master Vnode uses a writing process as follows:
|
Master Vnode uses a writing process as follows:
|
||||||
|
|
||||||

|

|
||||||
<center> Figure 3: TDengine Master writing process </center>
|
<center> Figure 3: TDengine Master writing process </center>
|
||||||
|
|
||||||
1. Master vnode receives the application data insertion request, verifies, and moves to next step;
|
1. Master vnode receives the application data insertion request, verifies, and moves to next step;
|
||||||
|
@ -137,7 +137,7 @@ Master Vnode uses a writing process as follows:
|
||||||
|
|
||||||
For a slave vnode, the write process as follows:
|
For a slave vnode, the write process as follows:
|
||||||
|
|
||||||

|

|
||||||
<center> Figure 4: TDengine Slave Writing Process </center>
|
<center> Figure 4: TDengine Slave Writing Process </center>
|
||||||
|
|
||||||
1. Slave vnode receives a data insertion request forwarded by Master vnode;
|
1. Slave vnode receives a data insertion request forwarded by Master vnode;
|
||||||
|
@ -267,7 +267,7 @@ For the data collected by device D1001, the number of records per hour is counte
|
||||||
|
|
||||||
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure:
|
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure:
|
||||||
|
|
||||||

|

|
||||||
<center> Figure 5: Diagram of multi-table aggregation query </center>
|
<center> Figure 5: Diagram of multi-table aggregation query </center>
|
||||||
|
|
||||||
1. Application sends a query condition to system;
|
1. Application sends a query condition to system;
|
||||||
|
|
|
@ -16,7 +16,7 @@ Current mainstream IT DevOps system usually include a data collection module, a
|
||||||
|
|
||||||
This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines of configuration files. The architecture is as follows.
|
This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines of configuration files. The architecture is as follows.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Installation steps
|
## Installation steps
|
||||||
|
|
||||||
|
@ -73,9 +73,9 @@ sudo systemctl start telegraf
|
||||||
|
|
||||||
Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`.
|
Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`.
|
||||||
Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon.
|
Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon.
|
||||||
Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard- v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen.
|
Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Wrap-up
|
## Wrap-up
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ The new version of TDengine supports multiple data protocols and can accept data
|
||||||
|
|
||||||
This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines of configuration files. The architecture is shown in the following figure.
|
This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines of configuration files. The architecture is shown in the following figure.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Installation Steps
|
## Installation Steps
|
||||||
|
|
||||||
|
@ -83,19 +83,19 @@ Click on the gear icon on the left and select `Plugins`, you should find the TDe
|
||||||
Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`, click the plus icon on the left and select Import, follow the instructions to import the JSON file. After that, you can see
|
Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`, click the plus icon on the left and select Import, follow the instructions to import the JSON file. After that, you can see
|
||||||
The dashboard can be seen in the following screen.
|
The dashboard can be seen in the following screen.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### import collectd dashboard
|
#### import collectd dashboard
|
||||||
|
|
||||||
Download the dashboard json file from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`. Download the dashboard json file, click the plus icon on the left side and select `Import`, and follow the interface prompts to select the JSON file to import. After that, you can see
|
Download the dashboard json file from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`. Download the dashboard json file, click the plus icon on the left side and select `Import`, and follow the interface prompts to select the JSON file to import. After that, you can see
|
||||||
dashboard with the following interface.
|
dashboard with the following interface.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### Importing the StatsD dashboard
|
#### Importing the StatsD dashboard
|
||||||
|
|
||||||
Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json`. Click on the plus icon on the left and select `Import`, and follow the interface prompts to import the JSON file. You will then see the dashboard in the following screen.
|
Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json`. Click on the plus icon on the left and select `Import`, and follow the interface prompts to import the JSON file. You will then see the dashboard in the following screen.
|
||||||

|

|
||||||
|
|
||||||
## Wrap-up
|
## Wrap-up
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ We will explain how to migrate OpenTSDB applications to TDengine quickly, secure
|
||||||
The following figure (Figure 1) shows the system's overall architecture for a typical DevOps application scenario.
|
The following figure (Figure 1) shows the system's overall architecture for a typical DevOps application scenario.
|
||||||
|
|
||||||
**Figure 1. Typical architecture in a DevOps scenario**
|
**Figure 1. Typical architecture in a DevOps scenario**
|
||||||

|

|
||||||
|
|
||||||
In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. Data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.).
|
In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. Data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.).
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ After writing the data to TDengine properly, you can adapt Grafana to visualize
|
||||||
TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use.
|
TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use.
|
||||||
|
|
||||||
**Importing Grafana Templates** Figure 2.
|
**Importing Grafana Templates** Figure 2.
|
||||||

|

|
||||||
|
|
||||||
After the above steps, you completed the migration to replace OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be adjusted to meet the migration work.
|
After the above steps, you completed the migration to replace OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be adjusted to meet the migration work.
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes
|
||||||
Suppose your application is particularly complex, or the application domain is not a DevOps scenario. You can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine.
|
Suppose your application is particularly complex, or the application domain is not a DevOps scenario. You can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine.
|
||||||
|
|
||||||
**Figure 3. System architecture after migration**
|
**Figure 3. System architecture after migration**
|
||||||

|

|
||||||
|
|
||||||
## Migration evaluation and strategy for other scenarios
|
## Migration evaluation and strategy for other scenarios
|
||||||
|
|
||||||
|
|
|
@ -36,10 +36,10 @@ int main() {
|
||||||
executeSQL(taos, "CREATE DATABASE power");
|
executeSQL(taos, "CREATE DATABASE power");
|
||||||
executeSQL(taos, "USE power");
|
executeSQL(taos, "USE power");
|
||||||
executeSQL(taos, "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
|
executeSQL(taos, "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
|
||||||
executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)"
|
executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)"
|
||||||
"d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)"
|
"d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)"
|
||||||
"d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)"
|
"d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)"
|
||||||
"d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)");
|
"d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)");
|
||||||
taos_close(taos);
|
taos_close(taos);
|
||||||
taos_cleanup();
|
taos_cleanup();
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,11 +29,11 @@ int main() {
|
||||||
executeSQL(taos, "USE test");
|
executeSQL(taos, "USE test");
|
||||||
char *line =
|
char *line =
|
||||||
"[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": "
|
"[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": "
|
||||||
"\"Beijing.Chaoyang\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, "
|
"\"California.SanFrancisco\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, "
|
||||||
"\"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}},{\"metric\": \"meters.current\", "
|
"\"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}},{\"metric\": \"meters.current\", "
|
||||||
"\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": "
|
"\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": "
|
||||||
"2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": "
|
"2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": "
|
||||||
"\"Beijing.Haidian\", \"groupid\": 1}}]";
|
"\"California.LosAngeles\", \"groupid\": 1}}]";
|
||||||
|
|
||||||
char *lines[] = {line};
|
char *lines[] = {line};
|
||||||
TAOS_RES *res = taos_schemaless_insert(taos, lines, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
|
TAOS_RES *res = taos_schemaless_insert(taos, lines, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
|
||||||
|
|
|
@ -27,10 +27,10 @@ int main() {
|
||||||
executeSQL(taos, "DROP DATABASE IF EXISTS test");
|
executeSQL(taos, "DROP DATABASE IF EXISTS test");
|
||||||
executeSQL(taos, "CREATE DATABASE test");
|
executeSQL(taos, "CREATE DATABASE test");
|
||||||
executeSQL(taos, "USE test");
|
executeSQL(taos, "USE test");
|
||||||
char *lines[] = {"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
char *lines[] = {"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"};
|
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"};
|
||||||
TAOS_RES *res = taos_schemaless_insert(taos, lines, 4, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
TAOS_RES *res = taos_schemaless_insert(taos, lines, 4, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||||
if (taos_errno(res) != 0) {
|
if (taos_errno(res) != 0) {
|
||||||
printf("failed to insert schema-less data, reason: %s\n", taos_errstr(res));
|
printf("failed to insert schema-less data, reason: %s\n", taos_errstr(res));
|
||||||
|
|
|
@ -52,7 +52,7 @@ void insertData(TAOS *taos) {
|
||||||
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
||||||
// bind table name and tags
|
// bind table name and tags
|
||||||
TAOS_BIND tags[2];
|
TAOS_BIND tags[2];
|
||||||
char *location = "Beijing.Chaoyang";
|
char *location = "California.SanFrancisco";
|
||||||
int groupId = 2;
|
int groupId = 2;
|
||||||
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||||
tags[0].buffer_length = strlen(location);
|
tags[0].buffer_length = strlen(location);
|
||||||
|
|
|
@ -139,5 +139,5 @@ int main() {
|
||||||
|
|
||||||
// output:
|
// output:
|
||||||
// ts current voltage phase location groupid
|
// ts current voltage phase location groupid
|
||||||
// 1648432611249 10.300000 219 0.310000 Beijing.Chaoyang 2
|
// 1648432611249 10.300000 219 0.310000 California.SanFrancisco 2
|
||||||
// 1648432611749 12.600000 218 0.330000 Beijing.Chaoyang 2
|
// 1648432611749 12.600000 218 0.330000 California.SanFrancisco 2
|
|
@ -59,7 +59,7 @@ void insertData(TAOS *taos) {
|
||||||
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
||||||
// bind table name and tags
|
// bind table name and tags
|
||||||
TAOS_BIND tags[2];
|
TAOS_BIND tags[2];
|
||||||
char* location = "Beijing.Chaoyang";
|
char* location = "California.SanFrancisco";
|
||||||
int groupId = 2;
|
int groupId = 2;
|
||||||
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||||
tags[0].buffer_length = strlen(location);
|
tags[0].buffer_length = strlen(location);
|
||||||
|
|
|
@ -28,14 +28,14 @@ int main() {
|
||||||
executeSQL(taos, "CREATE DATABASE test");
|
executeSQL(taos, "CREATE DATABASE test");
|
||||||
executeSQL(taos, "USE test");
|
executeSQL(taos, "USE test");
|
||||||
char *lines[] = {
|
char *lines[] = {
|
||||||
"meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
|
||||||
"meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
|
||||||
};
|
};
|
||||||
TAOS_RES *res = taos_schemaless_insert(taos, lines, 8, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
|
TAOS_RES *res = taos_schemaless_insert(taos, lines, 8, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
|
||||||
if (taos_errno(res) != 0) {
|
if (taos_errno(res) != 0) {
|
||||||
|
|
|
@ -9,10 +9,10 @@ namespace TDengineExample
|
||||||
IntPtr conn = GetConnection();
|
IntPtr conn = GetConnection();
|
||||||
PrepareDatabase(conn);
|
PrepareDatabase(conn);
|
||||||
string[] lines = {
|
string[] lines = {
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"
|
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"
|
||||||
};
|
};
|
||||||
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||||
if (TDengine.ErrorNo(res) != 0)
|
if (TDengine.ErrorNo(res) != 0)
|
||||||
|
|
|
@ -8,10 +8,10 @@ namespace TDengineExample
|
||||||
{
|
{
|
||||||
IntPtr conn = GetConnection();
|
IntPtr conn = GetConnection();
|
||||||
PrepareDatabase(conn);
|
PrepareDatabase(conn);
|
||||||
string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," +
|
string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
|
||||||
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " +
|
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " +
|
||||||
"{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," +
|
"{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
|
||||||
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]"
|
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"
|
||||||
};
|
};
|
||||||
|
|
||||||
IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
|
IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
|
||||||
|
|
|
@ -9,14 +9,14 @@ namespace TDengineExample
|
||||||
IntPtr conn = GetConnection();
|
IntPtr conn = GetConnection();
|
||||||
PrepareDatabase(conn);
|
PrepareDatabase(conn);
|
||||||
string[] lines = {
|
string[] lines = {
|
||||||
"meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
|
||||||
"meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
|
||||||
};
|
};
|
||||||
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
|
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
|
||||||
if (TDengine.ErrorNo(res) != 0)
|
if (TDengine.ErrorNo(res) != 0)
|
||||||
|
|
|
@ -158,5 +158,5 @@ namespace TDengineExample
|
||||||
// Connect to TDengine success
|
// Connect to TDengine success
|
||||||
// fieldCount=6
|
// fieldCount=6
|
||||||
// ts current voltage phase location groupid
|
// ts current voltage phase location groupid
|
||||||
// 1648432611249 10.3 219 0.31 Beijing.Chaoyang 2
|
// 1648432611249 10.3 219 0.31 California.SanFrancisco 2
|
||||||
// 1648432611749 12.6 218 0.33 Beijing.Chaoyang 2
|
// 1648432611749 12.6 218 0.33 California.SanFrancisco 2
|
|
@ -15,10 +15,10 @@ namespace TDengineExample
|
||||||
CheckRes(conn, res, "failed to change database");
|
CheckRes(conn, res, "failed to change database");
|
||||||
res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
|
res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
|
||||||
CheckRes(conn, res, "failed to create stable");
|
CheckRes(conn, res, "failed to create stable");
|
||||||
var sql = "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
|
var sql = "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
|
||||||
"d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
|
"d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
|
||||||
"d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
|
"d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
|
||||||
"d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
|
"d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
|
||||||
res = TDengine.Query(conn, sql);
|
res = TDengine.Query(conn, sql);
|
||||||
CheckRes(conn, res, "failed to insert data");
|
CheckRes(conn, res, "failed to insert data");
|
||||||
int affectedRows = TDengine.AffectRows(res);
|
int affectedRows = TDengine.AffectRows(res);
|
||||||
|
|
|
@ -21,7 +21,7 @@ namespace TDengineExample
|
||||||
CheckStmtRes(res, "failed to prepare stmt");
|
CheckStmtRes(res, "failed to prepare stmt");
|
||||||
|
|
||||||
// 2. bind table name and tags
|
// 2. bind table name and tags
|
||||||
TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("Beijing.Chaoyang"), TaosBind.BindInt(2) };
|
TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("California.SanFrancisco"), TaosBind.BindInt(2) };
|
||||||
res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags);
|
res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags);
|
||||||
CheckStmtRes(res, "failed to bind table name and tags");
|
CheckStmtRes(res, "failed to bind table name and tags");
|
||||||
|
|
||||||
|
|
|
@ -25,10 +25,10 @@ func main() {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
prepareDatabase(conn)
|
prepareDatabase(conn)
|
||||||
|
|
||||||
payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
|
payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
|
||||||
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}},
|
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}},
|
||||||
{"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
|
{"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
|
||||||
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]`
|
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]`
|
||||||
|
|
||||||
err = conn.OpenTSDBInsertJsonPayload(payload)
|
err = conn.OpenTSDBInsertJsonPayload(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -25,10 +25,10 @@ func main() {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
prepareDatabase(conn)
|
prepareDatabase(conn)
|
||||||
var lines = []string{
|
var lines = []string{
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250",
|
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250",
|
||||||
}
|
}
|
||||||
|
|
||||||
err = conn.InfluxDBInsertLines(lines, "ms")
|
err = conn.InfluxDBInsertLines(lines, "ms")
|
||||||
|
|
|
@ -19,10 +19,10 @@ func createStable(taos *sql.DB) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func insertData(taos *sql.DB) {
|
func insertData(taos *sql.DB) {
|
||||||
sql := `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
sql := `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||||
power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||||
power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||||
power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`
|
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`
|
||||||
result, err := taos.Exec(sql)
|
result, err := taos.Exec(sql)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("failed to insert, err:", err)
|
fmt.Println("failed to insert, err:", err)
|
||||||
|
|
|
@ -37,7 +37,7 @@ func main() {
|
||||||
checkErr(err, "failed to create prepare statement")
|
checkErr(err, "failed to create prepare statement")
|
||||||
|
|
||||||
// bind table name and tags
|
// bind table name and tags
|
||||||
tagParams := param.NewParam(2).AddBinary([]byte("Beijing.Chaoyang")).AddInt(2)
|
tagParams := param.NewParam(2).AddBinary([]byte("California.SanFrancisco")).AddInt(2)
|
||||||
err = stmt.SetTableNameWithTags("d1001", tagParams)
|
err = stmt.SetTableNameWithTags("d1001", tagParams)
|
||||||
checkErr(err, "failed to execute SetTableNameWithTags")
|
checkErr(err, "failed to execute SetTableNameWithTags")
|
||||||
|
|
||||||
|
|
|
@ -25,14 +25,14 @@ func main() {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
prepareDatabase(conn)
|
prepareDatabase(conn)
|
||||||
var lines = []string{
|
var lines = []string{
|
||||||
"meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
|
||||||
"meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
|
||||||
}
|
}
|
||||||
|
|
||||||
err = conn.OpenTSDBInsertTelnetLines(lines)
|
err = conn.OpenTSDBInsertTelnetLines(lines)
|
||||||
|
|
|
@ -23,10 +23,10 @@ public class JSONProtocolExample {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String getJSONData() {
|
private static String getJSONData() {
|
||||||
return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," +
|
return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
|
||||||
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " +
|
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " +
|
||||||
"{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," +
|
"{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
|
||||||
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]";
|
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
|
|
|
@ -12,11 +12,11 @@ import java.sql.Statement;
|
||||||
public class LineProtocolExample {
|
public class LineProtocolExample {
|
||||||
// format: measurement,tag_set field_set timestamp
|
// format: measurement,tag_set field_set timestamp
|
||||||
private static String[] lines = {
|
private static String[] lines = {
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro
|
"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro
|
||||||
// seconds
|
// seconds
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500",
|
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300",
|
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800",
|
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800",
|
||||||
};
|
};
|
||||||
|
|
||||||
private static Connection getConnection() throws SQLException {
|
private static Connection getConnection() throws SQLException {
|
||||||
|
|
|
@ -16,28 +16,28 @@ public class RestInsertExample {
|
||||||
|
|
||||||
private static List<String> getRawData() {
|
private static List<String> getRawData() {
|
||||||
return Arrays.asList(
|
return Arrays.asList(
|
||||||
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2",
|
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
|
||||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2",
|
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
|
||||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2",
|
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
|
||||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3",
|
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
|
||||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2",
|
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
|
||||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2",
|
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
|
||||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3",
|
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
|
||||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3"
|
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The generated SQL is:
|
* The generated SQL is:
|
||||||
* INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)
|
* INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)
|
||||||
* power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000)
|
* power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000)
|
||||||
* power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000)
|
* power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000)
|
||||||
* power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000)
|
* power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000)
|
||||||
* power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000)
|
* power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000)
|
||||||
* power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000)
|
* power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000)
|
||||||
* power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000)
|
* power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000)
|
||||||
* power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000)
|
* power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000)
|
||||||
*/
|
*/
|
||||||
private static String getSQL() {
|
private static String getSQL() {
|
||||||
StringBuilder sb = new StringBuilder("INSERT INTO ");
|
StringBuilder sb = new StringBuilder("INSERT INTO ");
|
||||||
|
|
|
@ -51,5 +51,5 @@ public class RestQueryExample {
|
||||||
|
|
||||||
// possible output:
|
// possible output:
|
||||||
// avg(voltage) location
|
// avg(voltage) location
|
||||||
// 222.0 Beijing.Haidian
|
// 222.0 California.LosAngeles
|
||||||
// 219.0 Beijing.Chaoyang
|
// 219.0 California.SanFrancisco
|
||||||
|
|
|
@ -30,14 +30,14 @@ public class StmtInsertExample {
|
||||||
|
|
||||||
private static List<String> getRawData() {
|
private static List<String> getRawData() {
|
||||||
return Arrays.asList(
|
return Arrays.asList(
|
||||||
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2",
|
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
|
||||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2",
|
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
|
||||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2",
|
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
|
||||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3",
|
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
|
||||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2",
|
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
|
||||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2",
|
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
|
||||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3",
|
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
|
||||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3"
|
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,14 +11,14 @@ import java.sql.Statement;
|
||||||
|
|
||||||
public class TelnetLineProtocolExample {
|
public class TelnetLineProtocolExample {
|
||||||
// format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
|
// format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
|
||||||
private static String[] lines = { "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
|
private static String[] lines = { "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
|
||||||
"meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
|
||||||
};
|
};
|
||||||
|
|
||||||
private static Connection getConnection() throws SQLException {
|
private static Connection getConnection() throws SQLException {
|
||||||
|
|
|
@ -23,16 +23,16 @@ public class TestAll {
|
||||||
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||||
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
|
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
|
||||||
try (Statement stmt = conn.createStatement()) {
|
try (Statement stmt = conn.createStatement()) {
|
||||||
String sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
|
String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
|
||||||
" power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
|
" power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
|
||||||
" power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
|
" power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
|
||||||
" power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
|
" power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
|
||||||
" power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
|
" power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
|
||||||
" power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
|
" power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
|
||||||
" power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
|
" power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
|
||||||
" power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
|
" power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
|
||||||
" power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
|
" power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
|
||||||
" power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
|
" power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
|
||||||
|
|
||||||
stmt.execute(sql);
|
stmt.execute(sql);
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,10 +13,10 @@ function createDatabase() {
|
||||||
|
|
||||||
function insertData() {
|
function insertData() {
|
||||||
const lines = [
|
const lines = [
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250",
|
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250",
|
||||||
];
|
];
|
||||||
cursor.schemalessInsert(
|
cursor.schemalessInsert(
|
||||||
lines,
|
lines,
|
||||||
|
|
|
@ -11,10 +11,10 @@ try {
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
|
"CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
|
||||||
);
|
);
|
||||||
var sql = `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
var sql = `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||||
power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||||
power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||||
power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`;
|
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`;
|
||||||
cursor.execute(sql);
|
cursor.execute(sql);
|
||||||
} finally {
|
} finally {
|
||||||
cursor.close();
|
cursor.close();
|
||||||
|
|
|
@ -25,7 +25,7 @@ function insertData() {
|
||||||
|
|
||||||
// bind table name and tags
|
// bind table name and tags
|
||||||
let tagBind = new taos.TaosBind(2);
|
let tagBind = new taos.TaosBind(2);
|
||||||
tagBind.bindBinary("Beijing.Chaoyang");
|
tagBind.bindBinary("California.SanFrancisco");
|
||||||
tagBind.bindInt(2);
|
tagBind.bindInt(2);
|
||||||
cursor.stmtSetTbnameTags("d1001", tagBind.getBind());
|
cursor.stmtSetTbnameTags("d1001", tagBind.getBind());
|
||||||
|
|
||||||
|
|
|
@ -17,25 +17,25 @@ function insertData() {
|
||||||
metric: "meters.current",
|
metric: "meters.current",
|
||||||
timestamp: 1648432611249,
|
timestamp: 1648432611249,
|
||||||
value: 10.3,
|
value: 10.3,
|
||||||
tags: { location: "Beijing.Chaoyang", groupid: 2 },
|
tags: { location: "California.SanFrancisco", groupid: 2 },
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
metric: "meters.voltage",
|
metric: "meters.voltage",
|
||||||
timestamp: 1648432611249,
|
timestamp: 1648432611249,
|
||||||
value: 219,
|
value: 219,
|
||||||
tags: { location: "Beijing.Haidian", groupid: 1 },
|
tags: { location: "California.LosAngeles", groupid: 1 },
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
metric: "meters.current",
|
metric: "meters.current",
|
||||||
timestamp: 1648432611250,
|
timestamp: 1648432611250,
|
||||||
value: 12.6,
|
value: 12.6,
|
||||||
tags: { location: "Beijing.Chaoyang", groupid: 2 },
|
tags: { location: "California.SanFrancisco", groupid: 2 },
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
metric: "meters.voltage",
|
metric: "meters.voltage",
|
||||||
timestamp: 1648432611250,
|
timestamp: 1648432611250,
|
||||||
value: 221,
|
value: 221,
|
||||||
tags: { location: "Beijing.Haidian", groupid: 1 },
|
tags: { location: "California.LosAngeles", groupid: 1 },
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
|
@ -13,14 +13,14 @@ function createDatabase() {
|
||||||
|
|
||||||
function insertData() {
|
function insertData() {
|
||||||
const lines = [
|
const lines = [
|
||||||
"meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
|
||||||
"meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
|
||||||
];
|
];
|
||||||
cursor.schemalessInsert(
|
cursor.schemalessInsert(
|
||||||
lines,
|
lines,
|
||||||
|
|
|
@ -24,7 +24,7 @@ function insertData() {
|
||||||
|
|
||||||
// bind table name and tags
|
// bind table name and tags
|
||||||
let tagBind = new taos.TaosBind(2);
|
let tagBind = new taos.TaosBind(2);
|
||||||
tagBind.bindBinary("Beijing.Chaoyang");
|
tagBind.bindBinary("California.SanFrancisco");
|
||||||
tagBind.bindInt(2);
|
tagBind.bindInt(2);
|
||||||
cursor.stmtSetTbnameTags("d1001", tagBind.getBind());
|
cursor.stmtSetTbnameTags("d1001", tagBind.getBind());
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ use TDengine\Connection;
|
||||||
use TDengine\Exception\TDengineException;
|
use TDengine\Exception\TDengineException;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// 实例化
|
// instantiate
|
||||||
$host = 'localhost';
|
$host = 'localhost';
|
||||||
$port = 6030;
|
$port = 6030;
|
||||||
$username = 'root';
|
$username = 'root';
|
||||||
|
@ -12,9 +12,9 @@ try {
|
||||||
$dbname = null;
|
$dbname = null;
|
||||||
$connection = new Connection($host, $port, $username, $password, $dbname);
|
$connection = new Connection($host, $port, $username, $password, $dbname);
|
||||||
|
|
||||||
// 连接
|
// connect
|
||||||
$connection->connect();
|
$connection->connect();
|
||||||
} catch (TDengineException $e) {
|
} catch (TDengineException $e) {
|
||||||
// 连接失败捕获异常
|
// throw exception
|
||||||
throw $e;
|
throw $e;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ use TDengine\Connection;
|
||||||
use TDengine\Exception\TDengineException;
|
use TDengine\Exception\TDengineException;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// 实例化
|
// instantiate
|
||||||
$host = 'localhost';
|
$host = 'localhost';
|
||||||
$port = 6030;
|
$port = 6030;
|
||||||
$username = 'root';
|
$username = 'root';
|
||||||
|
@ -12,22 +12,22 @@ try {
|
||||||
$dbname = 'power';
|
$dbname = 'power';
|
||||||
$connection = new Connection($host, $port, $username, $password, $dbname);
|
$connection = new Connection($host, $port, $username, $password, $dbname);
|
||||||
|
|
||||||
// 连接
|
// connect
|
||||||
$connection->connect();
|
$connection->connect();
|
||||||
|
|
||||||
// 插入
|
// insert
|
||||||
$connection->query('CREATE DATABASE if not exists power');
|
$connection->query('CREATE DATABASE if not exists power');
|
||||||
$connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)');
|
$connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)');
|
||||||
$resource = $connection->query(<<<'SQL'
|
$resource = $connection->query(<<<'SQL'
|
||||||
INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||||
power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||||
power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||||
power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
|
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
|
||||||
SQL);
|
SQL);
|
||||||
|
|
||||||
// 影响行数
|
// get affected rows
|
||||||
var_dump($resource->affectedRows());
|
var_dump($resource->affectedRows());
|
||||||
} catch (TDengineException $e) {
|
} catch (TDengineException $e) {
|
||||||
// 捕获异常
|
// throw exception
|
||||||
throw $e;
|
throw $e;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ use TDengine\Connection;
|
||||||
use TDengine\Exception\TDengineException;
|
use TDengine\Exception\TDengineException;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// 实例化
|
// instantiate
|
||||||
$host = 'localhost';
|
$host = 'localhost';
|
||||||
$port = 6030;
|
$port = 6030;
|
||||||
$username = 'root';
|
$username = 'root';
|
||||||
|
@ -12,18 +12,18 @@ try {
|
||||||
$dbname = 'power';
|
$dbname = 'power';
|
||||||
$connection = new Connection($host, $port, $username, $password, $dbname);
|
$connection = new Connection($host, $port, $username, $password, $dbname);
|
||||||
|
|
||||||
// 连接
|
// connect
|
||||||
$connection->connect();
|
$connection->connect();
|
||||||
|
|
||||||
// 插入
|
// insert
|
||||||
$connection->query('CREATE DATABASE if not exists power');
|
$connection->query('CREATE DATABASE if not exists power');
|
||||||
$connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)');
|
$connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)');
|
||||||
$stmt = $connection->prepare('INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)');
|
$stmt = $connection->prepare('INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)');
|
||||||
|
|
||||||
// 设置表名和标签
|
// set table name and tags
|
||||||
$stmt->setTableNameTags('d1001', [
|
$stmt->setTableNameTags('d1001', [
|
||||||
// 支持格式同参数绑定
|
// 支持格式同参数绑定
|
||||||
[TDengine\TSDB_DATA_TYPE_BINARY, 'Beijing.Chaoyang'],
|
[TDengine\TSDB_DATA_TYPE_BINARY, 'California.SanFrancisco'],
|
||||||
[TDengine\TSDB_DATA_TYPE_INT, 2],
|
[TDengine\TSDB_DATA_TYPE_INT, 2],
|
||||||
]);
|
]);
|
||||||
|
|
||||||
|
@ -41,9 +41,9 @@ try {
|
||||||
]);
|
]);
|
||||||
$resource = $stmt->execute();
|
$resource = $stmt->execute();
|
||||||
|
|
||||||
// 影响行数
|
// get affected rows
|
||||||
var_dump($resource->affectedRows());
|
var_dump($resource->affectedRows());
|
||||||
} catch (TDengineException $e) {
|
} catch (TDengineException $e) {
|
||||||
// 捕获异常
|
// throw exception
|
||||||
throw $e;
|
throw $e;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ use TDengine\Connection;
|
||||||
use TDengine\Exception\TDengineException;
|
use TDengine\Exception\TDengineException;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// 实例化
|
// instantiate
|
||||||
$host = 'localhost';
|
$host = 'localhost';
|
||||||
$port = 6030;
|
$port = 6030;
|
||||||
$username = 'root';
|
$username = 'root';
|
||||||
|
@ -12,12 +12,12 @@ try {
|
||||||
$dbname = 'power';
|
$dbname = 'power';
|
||||||
$connection = new Connection($host, $port, $username, $password, $dbname);
|
$connection = new Connection($host, $port, $username, $password, $dbname);
|
||||||
|
|
||||||
// 连接
|
// connect
|
||||||
$connection->connect();
|
$connection->connect();
|
||||||
|
|
||||||
$resource = $connection->query('SELECT ts, current FROM meters LIMIT 2');
|
$resource = $connection->query('SELECT ts, current FROM meters LIMIT 2');
|
||||||
var_dump($resource->fetch());
|
var_dump($resource->fetch());
|
||||||
} catch (TDengineException $e) {
|
} catch (TDengineException $e) {
|
||||||
// 捕获异常
|
// throw exception
|
||||||
throw $e;
|
throw $e;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,14 +2,14 @@ import taos
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
# note: lines have already been sorted by table name
|
# note: lines have already been sorted by table name
|
||||||
lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'Beijing.Chaoyang', 2),
|
lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'California.SanFrancisco', 2),
|
||||||
('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'Beijing.Chaoyang', 2),
|
('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'California.SanFrancisco', 2),
|
||||||
('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'Beijing.Chaoyang', 2),
|
('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'California.SanFrancisco', 2),
|
||||||
('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'Beijing.Chaoyang', 3),
|
('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'California.SanFrancisco', 3),
|
||||||
('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'Beijing.Haidian', 2),
|
('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'California.LosAngeles', 2),
|
||||||
('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'Beijing.Haidian', 2),
|
('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'California.LosAngeles', 2),
|
||||||
('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'Beijing.Haidian', 3),
|
('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'California.LosAngeles', 3),
|
||||||
('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'Beijing.Haidian', 3)]
|
('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'California.LosAngeles', 3)]
|
||||||
|
|
||||||
|
|
||||||
def get_ts(ts: str):
|
def get_ts(ts: str):
|
||||||
|
|
|
@ -16,10 +16,10 @@ cursor.execute("CREATE DATABASE power")
|
||||||
cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
|
cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
|
||||||
|
|
||||||
# insert data
|
# insert data
|
||||||
cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||||
power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||||
power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||||
power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
|
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
|
||||||
print("inserted row count:", cursor.rowcount)
|
print("inserted row count:", cursor.rowcount)
|
||||||
|
|
||||||
# query data
|
# query data
|
||||||
|
|
|
@ -3,12 +3,12 @@ import json
|
||||||
import taos
|
import taos
|
||||||
from taos import SmlProtocol, SmlPrecision
|
from taos import SmlProtocol, SmlPrecision
|
||||||
|
|
||||||
lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
|
lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
|
||||||
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219,
|
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219,
|
||||||
"tags": {"location": "Beijing.Haidian", "groupid": 1}},
|
"tags": {"location": "California.LosAngeles", "groupid": 1}},
|
||||||
{"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6,
|
{"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6,
|
||||||
"tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
|
"tags": {"location": "California.SanFrancisco", "groupid": 2}},
|
||||||
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]
|
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]
|
||||||
|
|
||||||
|
|
||||||
def get_connection():
|
def get_connection():
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
import taos
|
import taos
|
||||||
from taos import SmlProtocol, SmlPrecision
|
from taos import SmlProtocol, SmlPrecision
|
||||||
|
|
||||||
lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000",
|
lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000",
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500",
|
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300",
|
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800",
|
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,10 +3,10 @@ from datetime import datetime
|
||||||
|
|
||||||
# ANCHOR: bind_batch
|
# ANCHOR: bind_batch
|
||||||
table_tags = {
|
table_tags = {
|
||||||
"d1001": ('Beijing.Chaoyang', 2),
|
"d1001": ('California.SanFrancisco', 2),
|
||||||
"d1002": ('Beijing.Chaoyang', 3),
|
"d1002": ('California.SanFrancisco', 3),
|
||||||
"d1003": ('Beijing.Haidian', 2),
|
"d1003": ('California.LosAngeles', 2),
|
||||||
"d1004": ('Beijing.Haidian', 3)
|
"d1004": ('California.LosAngeles', 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
table_values = {
|
table_values = {
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
import taos
|
import taos
|
||||||
|
|
||||||
lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2",
|
lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
|
||||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3",
|
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
|
||||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2",
|
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
|
||||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3",
|
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3",
|
||||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3",
|
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
|
||||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2",
|
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
|
||||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2",
|
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
|
||||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2"]
|
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"]
|
||||||
|
|
||||||
|
|
||||||
def get_connection() -> taos.TaosConnection:
|
def get_connection() -> taos.TaosConnection:
|
||||||
|
@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection):
|
||||||
|
|
||||||
|
|
||||||
# The generated SQL is:
|
# The generated SQL is:
|
||||||
# INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||||
# d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||||
# d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||||
# d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
|
# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
|
||||||
|
|
||||||
def get_sql():
|
def get_sql():
|
||||||
global lines
|
global lines
|
||||||
|
|
|
@ -14,8 +14,8 @@ def query_api_demo(conn: taos.TaosConnection):
|
||||||
# field count: 7
|
# field count: 7
|
||||||
# meta of files[1]: {name: ts, type: 9, bytes: 8}
|
# meta of files[1]: {name: ts, type: 9, bytes: 8}
|
||||||
# ======================Iterate on result=========================
|
# ======================Iterate on result=========================
|
||||||
# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 5), 10.300000190734863, 219, 0.3100000023841858, 'Beijing.Chaoyang', 2)
|
# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 5), 10.300000190734863, 219, 0.3100000023841858, 'California.SanFrancisco', 2)
|
||||||
# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 15), 12.600000381469727, 218, 0.33000001311302185, 'Beijing.Chaoyang', 2)
|
# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 15), 12.600000381469727, 218, 0.33000001311302185, 'California.SanFrancisco', 2)
|
||||||
# ANCHOR_END: iter
|
# ANCHOR_END: iter
|
||||||
|
|
||||||
# ANCHOR: fetch_all
|
# ANCHOR: fetch_all
|
||||||
|
|
|
@ -2,14 +2,14 @@ import taos
|
||||||
from taos import SmlProtocol, SmlPrecision
|
from taos import SmlProtocol, SmlPrecision
|
||||||
|
|
||||||
# format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
|
# format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
|
||||||
lines = ["meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
|
lines = ["meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
|
||||||
"meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ async fn main() -> Result<(), Error> {
|
||||||
stmt.set_tbname_tags(
|
stmt.set_tbname_tags(
|
||||||
"d1001",
|
"d1001",
|
||||||
[
|
[
|
||||||
Field::Binary(BString::from("Beijing.Chaoyang")),
|
Field::Binary(BString::from("California.SanFrancisco")),
|
||||||
Field::Int(2),
|
Field::Int(2),
|
||||||
],
|
],
|
||||||
)?;
|
)?;
|
||||||
|
|
|
@ -5,10 +5,10 @@ async fn main() -> Result<(), Error> {
|
||||||
let taos = TaosCfg::default().connect().expect("fail to connect");
|
let taos = TaosCfg::default().connect().expect("fail to connect");
|
||||||
taos.create_database("power").await?;
|
taos.create_database("power").await?;
|
||||||
taos.exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
|
taos.exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
|
||||||
let sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
let sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||||
power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||||
power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||||
power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
|
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
|
||||||
let result = taos.query(sql).await?;
|
let result = taos.query(sql).await?;
|
||||||
println!("{:?}", result);
|
println!("{:?}", result);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -5,10 +5,10 @@ fn main() {
|
||||||
let taos = TaosCfg::default().connect().expect("fail to connect");
|
let taos = TaosCfg::default().connect().expect("fail to connect");
|
||||||
taos.raw_query("CREATE DATABASE test").unwrap();
|
taos.raw_query("CREATE DATABASE test").unwrap();
|
||||||
taos.raw_query("USE test").unwrap();
|
taos.raw_query("USE test").unwrap();
|
||||||
let lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
let lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
|
||||||
"meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"];
|
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"];
|
||||||
let affected_rows = taos
|
let affected_rows = taos
|
||||||
.schemaless_insert(
|
.schemaless_insert(
|
||||||
&lines,
|
&lines,
|
||||||
|
|
|
@ -6,10 +6,10 @@ fn main() {
|
||||||
taos.raw_query("CREATE DATABASE test").unwrap();
|
taos.raw_query("CREATE DATABASE test").unwrap();
|
||||||
taos.raw_query("USE test").unwrap();
|
taos.raw_query("USE test").unwrap();
|
||||||
let lines = [
|
let lines = [
|
||||||
r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
|
r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
|
||||||
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}},
|
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}},
|
||||||
{"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
|
{"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
|
||||||
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]"#,
|
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#,
|
||||||
];
|
];
|
||||||
|
|
||||||
let affected_rows = taos
|
let affected_rows = taos
|
||||||
|
|
|
@ -6,14 +6,14 @@ fn main() {
|
||||||
taos.raw_query("CREATE DATABASE test").unwrap();
|
taos.raw_query("CREATE DATABASE test").unwrap();
|
||||||
taos.raw_query("USE test").unwrap();
|
taos.raw_query("USE test").unwrap();
|
||||||
let lines = [
|
let lines = [
|
||||||
"meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
|
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
|
||||||
"meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
|
||||||
"meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
|
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
|
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
|
||||||
"meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
|
||||||
"meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
|
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
|
||||||
];
|
];
|
||||||
let affected_rows = taos
|
let affected_rows = taos
|
||||||
.schemaless_insert(
|
.schemaless_insert(
|
||||||
|
|
|
@ -479,12 +479,8 @@ int32_t tDecodeSEpSet(SDecoder* pDecoder, SEpSet* pEp);
|
||||||
int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp);
|
int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp);
|
||||||
void* taosDecodeSEpSet(const void* buf, SEpSet* pEp);
|
void* taosDecodeSEpSet(const void* buf, SEpSet* pEp);
|
||||||
|
|
||||||
typedef struct {
|
int32_t tSerializeSEpSet(void* buf, int32_t bufLen, const SEpSet* pEpset);
|
||||||
SEpSet epSet;
|
int32_t tDeserializeSEpSet(void* buf, int32_t buflen, SEpSet* pEpset);
|
||||||
} SMEpSet;
|
|
||||||
|
|
||||||
int32_t tSerializeSMEpSet(void* buf, int32_t bufLen, SMEpSet* pReq);
|
|
||||||
int32_t tDeserializeSMEpSet(void* buf, int32_t buflen, SMEpSet* pReq);
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t connType;
|
int8_t connType;
|
||||||
|
@ -656,6 +652,9 @@ typedef struct {
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t code;
|
int32_t code;
|
||||||
|
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||||
|
int32_t sversion;
|
||||||
|
int32_t tversion;
|
||||||
} SQueryTableRsp;
|
} SQueryTableRsp;
|
||||||
|
|
||||||
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||||
|
@ -1729,9 +1728,9 @@ int32_t tDecodeSVDropStbReq(SDecoder* pCoder, SVDropStbReq* pReq);
|
||||||
#define TD_CREATE_IF_NOT_EXISTS 0x1
|
#define TD_CREATE_IF_NOT_EXISTS 0x1
|
||||||
typedef struct SVCreateTbReq {
|
typedef struct SVCreateTbReq {
|
||||||
int32_t flags;
|
int32_t flags;
|
||||||
|
char* name;
|
||||||
tb_uid_t uid;
|
tb_uid_t uid;
|
||||||
int64_t ctime;
|
int64_t ctime;
|
||||||
char* name;
|
|
||||||
int32_t ttl;
|
int32_t ttl;
|
||||||
int8_t type;
|
int8_t type;
|
||||||
union {
|
union {
|
||||||
|
|
|
@ -182,8 +182,6 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_MQ_DISCONNECT, "vnode-mq-disconnect", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_MQ_DISCONNECT, "vnode-mq-disconnect", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_RES_READY, "vnode-res-ready", NULL, NULL)
|
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_TASKS_STATUS, "vnode-tasks-status", NULL, NULL)
|
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_TASK, "vnode-cancel-task", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_TASK, "vnode-cancel-task", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TASK, "vnode-drop-task", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TASK, "vnode-drop-task", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL)
|
||||||
|
|
|
@ -61,7 +61,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle);
|
||||||
* @param type
|
* @param type
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type);
|
int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set multiple input data blocks for the stream scan.
|
* Set multiple input data blocks for the stream scan.
|
||||||
|
@ -71,7 +71,7 @@ int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type);
|
||||||
* @param type
|
* @param type
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type);
|
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update the table id list, add or remove.
|
* Update the table id list, add or remove.
|
||||||
|
|
|
@ -322,21 +322,22 @@ typedef enum EQueryExecMode {
|
||||||
} EQueryExecMode;
|
} EQueryExecMode;
|
||||||
|
|
||||||
typedef struct SQuery {
|
typedef struct SQuery {
|
||||||
ENodeType type;
|
ENodeType type;
|
||||||
EQueryExecMode execMode;
|
EQueryExecMode execMode;
|
||||||
bool haveResultSet;
|
bool haveResultSet;
|
||||||
SNode* pRoot;
|
SNode* pRoot;
|
||||||
int32_t numOfResCols;
|
int32_t numOfResCols;
|
||||||
SSchema* pResSchema;
|
SSchema* pResSchema;
|
||||||
int8_t precision;
|
int8_t precision;
|
||||||
SCmdMsgInfo* pCmdMsg;
|
SCmdMsgInfo* pCmdMsg;
|
||||||
int32_t msgType;
|
int32_t msgType;
|
||||||
SArray* pDbList;
|
SArray* pDbList;
|
||||||
SArray* pTableList;
|
SArray* pTableList;
|
||||||
bool showRewrite;
|
bool showRewrite;
|
||||||
int32_t placeholderNum;
|
int32_t placeholderNum;
|
||||||
SArray* pPlaceholderValues;
|
SArray* pPlaceholderValues;
|
||||||
SNode* pPrepareRoot;
|
SNode* pPrepareRoot;
|
||||||
|
struct SParseMetaCache* pMetaCache;
|
||||||
} SQuery;
|
} SQuery;
|
||||||
|
|
||||||
void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker walker, void* pContext);
|
void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker walker, void* pContext);
|
||||||
|
|
|
@ -23,6 +23,9 @@ extern "C" {
|
||||||
#include "query.h"
|
#include "query.h"
|
||||||
#include "querynodes.h"
|
#include "querynodes.h"
|
||||||
|
|
||||||
|
struct SCatalogReq;
|
||||||
|
struct SMetaData;
|
||||||
|
|
||||||
typedef struct SStmtCallback {
|
typedef struct SStmtCallback {
|
||||||
TAOS_STMT* pStmt;
|
TAOS_STMT* pStmt;
|
||||||
int32_t (*getTbNameFn)(TAOS_STMT*, char**);
|
int32_t (*getTbNameFn)(TAOS_STMT*, char**);
|
||||||
|
@ -45,11 +48,17 @@ typedef struct SParseContext {
|
||||||
SStmtCallback* pStmtCb;
|
SStmtCallback* pStmtCb;
|
||||||
const char* pUser;
|
const char* pUser;
|
||||||
bool isSuperUser;
|
bool isSuperUser;
|
||||||
|
bool async;
|
||||||
} SParseContext;
|
} SParseContext;
|
||||||
|
|
||||||
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
|
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
|
||||||
bool qIsInsertSql(const char* pStr, size_t length);
|
bool qIsInsertSql(const char* pStr, size_t length);
|
||||||
|
|
||||||
|
// for async mode
|
||||||
|
int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq);
|
||||||
|
int32_t qSemanticAnalysisSql(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
|
||||||
|
const struct SMetaData* pMetaData, SQuery* pQuery);
|
||||||
|
|
||||||
void qDestroyQuery(SQuery* pQueryNode);
|
void qDestroyQuery(SQuery* pQueryNode);
|
||||||
|
|
||||||
int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
|
int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
|
||||||
|
|
|
@ -56,12 +56,6 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||||
|
|
||||||
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||||
|
|
||||||
int32_t qWorkerProcessDataSinkMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
|
||||||
|
|
||||||
int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
|
||||||
|
|
||||||
int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
|
||||||
|
|
||||||
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||||
|
|
||||||
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||||
|
@ -72,10 +66,6 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||||
|
|
||||||
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||||
|
|
||||||
int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
|
||||||
|
|
||||||
int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
|
||||||
|
|
||||||
void qWorkerDestroy(void **qWorkerMgmt);
|
void qWorkerDestroy(void **qWorkerMgmt);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -124,6 +124,7 @@ void rpcSendRedirectRsp(void *pConn, const SEpSet *pEpSet);
|
||||||
void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
|
void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
|
||||||
int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
||||||
void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||||
|
void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -643,6 +643,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_PAR_INVALID_DROP_COL TAOS_DEF_ERROR_CODE(0, 0x2651)
|
#define TSDB_CODE_PAR_INVALID_DROP_COL TAOS_DEF_ERROR_CODE(0, 0x2651)
|
||||||
#define TSDB_CODE_PAR_INVALID_COL_JSON TAOS_DEF_ERROR_CODE(0, 0x2652)
|
#define TSDB_CODE_PAR_INVALID_COL_JSON TAOS_DEF_ERROR_CODE(0, 0x2652)
|
||||||
#define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653)
|
#define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653)
|
||||||
|
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2654)
|
||||||
|
|
||||||
//planner
|
//planner
|
||||||
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
|
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
|
||||||
|
|
|
@ -247,7 +247,7 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_EP_LEN (TSDB_FQDN_LEN + 6)
|
#define TSDB_EP_LEN (TSDB_FQDN_LEN + 6)
|
||||||
#define TSDB_IPv4ADDR_LEN 16
|
#define TSDB_IPv4ADDR_LEN 16
|
||||||
#define TSDB_FILENAME_LEN 128
|
#define TSDB_FILENAME_LEN 128
|
||||||
#define TSDB_SHOW_SQL_LEN 512
|
#define TSDB_SHOW_SQL_LEN 1024
|
||||||
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
||||||
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,7 @@ extern int32_t fsDebugFlag;
|
||||||
extern int32_t metaDebugFlag;
|
extern int32_t metaDebugFlag;
|
||||||
extern int32_t fnDebugFlag;
|
extern int32_t fnDebugFlag;
|
||||||
extern int32_t smaDebugFlag;
|
extern int32_t smaDebugFlag;
|
||||||
|
extern int32_t idxDebugFlag;
|
||||||
|
|
||||||
int32_t taosInitLog(const char *logName, int32_t maxFiles);
|
int32_t taosInitLog(const char *logName, int32_t maxFiles);
|
||||||
void taosCloseLog();
|
void taosCloseLog();
|
||||||
|
|
|
@ -394,8 +394,8 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) {
|
||||||
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
|
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
||||||
taosArrayPush(pArray, &tbSver);
|
taosArrayPush(pArray, &tbSver);
|
||||||
}
|
}
|
||||||
} else if (TDMT_VND_QUERY == pRequest->type) {
|
} else if (TDMT_VND_QUERY == pRequest->type) {
|
||||||
|
@ -552,12 +552,12 @@ int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest) {
|
||||||
|
|
||||||
int32_t removeMeta(STscObj* pTscObj, SArray* tbList) {
|
int32_t removeMeta(STscObj* pTscObj, SArray* tbList) {
|
||||||
SCatalog* pCatalog = NULL;
|
SCatalog* pCatalog = NULL;
|
||||||
int32_t tbNum = taosArrayGetSize(tbList);
|
int32_t tbNum = taosArrayGetSize(tbList);
|
||||||
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
|
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < tbNum; ++i) {
|
for (int32_t i = 0; i < tbNum; ++i) {
|
||||||
SName* pTbName = taosArrayGet(tbList, i);
|
SName* pTbName = taosArrayGet(tbList, i);
|
||||||
catalogRemoveTableMeta(pCatalog, pTbName);
|
catalogRemoveTableMeta(pCatalog, pTbName);
|
||||||
|
@ -566,7 +566,6 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
||||||
SRequestObj* pRequest = NULL;
|
SRequestObj* pRequest = NULL;
|
||||||
int32_t retryNum = 0;
|
int32_t retryNum = 0;
|
||||||
|
@ -589,7 +588,7 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
||||||
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
|
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
|
||||||
removeMeta(pTscObj, pRequest->tableList);
|
removeMeta(pTscObj, pRequest->tableList);
|
||||||
}
|
}
|
||||||
|
|
||||||
return pRequest;
|
return pRequest;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -730,11 +729,6 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
|
||||||
taosMemoryFreeClear(pMsgBody);
|
taosMemoryFreeClear(pMsgBody);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool persistConnForSpecificMsg(void* parenct, tmsg_t msgType) {
|
|
||||||
return msgType == TDMT_VND_QUERY_RSP || msgType == TDMT_VND_FETCH_RSP || msgType == TDMT_VND_RES_READY_RSP ||
|
|
||||||
msgType == TDMT_VND_QUERY_HEARTBEAT_RSP;
|
|
||||||
}
|
|
||||||
|
|
||||||
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||||
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
||||||
assert(pMsg->info.ahandle != NULL);
|
assert(pMsg->info.ahandle != NULL);
|
||||||
|
|
|
@ -58,7 +58,12 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &connectRsp.epSet)) {
|
if (connectRsp.dnodeNum == 1) {
|
||||||
|
SEpSet srcEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||||
|
SEpSet dstEpSet = connectRsp.epSet;
|
||||||
|
rpcSetDefaultAddr(pTscObj->pAppInfo->pTransporter, srcEpSet.eps[srcEpSet.inUse].fqdn,
|
||||||
|
dstEpSet.eps[dstEpSet.inUse].fqdn);
|
||||||
|
} else if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &connectRsp.epSet)) {
|
||||||
updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &connectRsp.epSet);
|
updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &connectRsp.epSet);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,9 +131,10 @@ int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) {
|
||||||
|
|
||||||
if (usedbRsp.vgVersion >= 0) {
|
if (usedbRsp.vgVersion >= 0) {
|
||||||
uint64_t clusterId = pRequest->pTscObj->pAppInfo->clusterId;
|
uint64_t clusterId = pRequest->pTscObj->pAppInfo->clusterId;
|
||||||
int32_t code1 = catalogGetHandle(clusterId, &pCatalog);
|
int32_t code1 = catalogGetHandle(clusterId, &pCatalog);
|
||||||
if (code1 != TSDB_CODE_SUCCESS) {
|
if (code1 != TSDB_CODE_SUCCESS) {
|
||||||
tscWarn("0x%" PRIx64 "catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->requestId, clusterId, tstrerror(code1));
|
tscWarn("0x%" PRIx64 "catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->requestId, clusterId,
|
||||||
|
tstrerror(code1));
|
||||||
} else {
|
} else {
|
||||||
catalogRemoveDB(pCatalog, usedbRsp.db, usedbRsp.uid);
|
catalogRemoveDB(pCatalog, usedbRsp.db, usedbRsp.uid);
|
||||||
}
|
}
|
||||||
|
@ -158,7 +164,7 @@ int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) {
|
||||||
if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash);
|
if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash);
|
||||||
taosMemoryFreeClear(output.dbVgroup);
|
taosMemoryFreeClear(output.dbVgroup);
|
||||||
|
|
||||||
tscError("0x%" PRIx64" failed to build use db output since %s", pRequest->requestId, terrstr());
|
tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr());
|
||||||
} else if (output.dbVgroup) {
|
} else if (output.dbVgroup) {
|
||||||
struct SCatalog* pCatalog = NULL;
|
struct SCatalog* pCatalog = NULL;
|
||||||
|
|
||||||
|
|
|
@ -125,11 +125,15 @@ static const SSysDbTableSchema userStbsSchema[] = {
|
||||||
|
|
||||||
static const SSysDbTableSchema streamSchema[] = {
|
static const SSysDbTableSchema streamSchema[] = {
|
||||||
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
{.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR},
|
|
||||||
{.name = "dest_table", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
|
||||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||||
{.name = "sql", .bytes = 1024, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
};
|
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||||
|
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
|
{.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
|
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
|
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
||||||
|
{.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||||
|
};
|
||||||
|
|
||||||
static const SSysDbTableSchema userTblsSchema[] = {
|
static const SSysDbTableSchema userTblsSchema[] = {
|
||||||
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
|
|
|
@ -79,9 +79,10 @@ uint16_t tsTelemPort = 80;
|
||||||
|
|
||||||
// schemaless
|
// schemaless
|
||||||
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
|
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
|
||||||
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value.
|
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
|
||||||
//If set to empty system will generate table name using MD5 hash.
|
// If set to empty system will generate table name using MD5 hash.
|
||||||
bool tsSmlDataFormat = true; // true means that the name and order of cols in each line are the same(only for influx protocol)
|
bool tsSmlDataFormat =
|
||||||
|
true; // true means that the name and order of cols in each line are the same(only for influx protocol)
|
||||||
|
|
||||||
// query
|
// query
|
||||||
int32_t tsQueryPolicy = 1;
|
int32_t tsQueryPolicy = 1;
|
||||||
|
@ -292,6 +293,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1;
|
if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1;
|
if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1;
|
if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "idxDebugFlag", 0, 0, 255, 1) != 0) return -1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,6 +309,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, 0) != 0) return -1;
|
if (cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, 0) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "fnDebugFlag", fnDebugFlag, 0, 255, 0) != 0) return -1;
|
if (cfgAddInt32(pCfg, "fnDebugFlag", fnDebugFlag, 0, 255, 0) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1;
|
if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 0) != 0) return -1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -479,6 +482,7 @@ static void taosSetClientLogCfg(SConfig *pCfg) {
|
||||||
rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32;
|
rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32;
|
||||||
tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32;
|
tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32;
|
||||||
jniDebugFlag = cfgGetItem(pCfg, "jniDebugFlag")->i32;
|
jniDebugFlag = cfgGetItem(pCfg, "jniDebugFlag")->i32;
|
||||||
|
idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void taosSetServerLogCfg(SConfig *pCfg) {
|
static void taosSetServerLogCfg(SConfig *pCfg) {
|
||||||
|
@ -493,6 +497,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
|
||||||
fsDebugFlag = cfgGetItem(pCfg, "fsDebugFlag")->i32;
|
fsDebugFlag = cfgGetItem(pCfg, "fsDebugFlag")->i32;
|
||||||
fnDebugFlag = cfgGetItem(pCfg, "fnDebugFlag")->i32;
|
fnDebugFlag = cfgGetItem(pCfg, "fnDebugFlag")->i32;
|
||||||
smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
|
smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
|
||||||
|
idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosSetClientCfg(SConfig *pCfg) {
|
static int32_t taosSetClientCfg(SConfig *pCfg) {
|
||||||
|
|
|
@ -665,22 +665,24 @@ void tFreeSMAltertbReq(SMAlterStbReq *pReq) {
|
||||||
taosArrayDestroy(pReq->pFields);
|
taosArrayDestroy(pReq->pFields);
|
||||||
pReq->pFields = NULL;
|
pReq->pFields = NULL;
|
||||||
}
|
}
|
||||||
int32_t tSerializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) {
|
|
||||||
|
int32_t tSerializeSEpSet(void *buf, int32_t bufLen, const SEpSet *pEpset) {
|
||||||
SEncoder encoder = {0};
|
SEncoder encoder = {0};
|
||||||
tEncoderInit(&encoder, buf, bufLen);
|
tEncoderInit(&encoder, buf, bufLen);
|
||||||
if (tStartEncode(&encoder) < 0) return -1;
|
if (tStartEncode(&encoder) < 0) return -1;
|
||||||
if (tEncodeSEpSet(&encoder, &pReq->epSet) < 0) return -1;
|
if (tEncodeSEpSet(&encoder, pEpset) < 0) return -1;
|
||||||
|
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
tEncoderClear(&encoder);
|
tEncoderClear(&encoder);
|
||||||
return tlen;
|
return tlen;
|
||||||
}
|
}
|
||||||
int32_t tDeserializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) {
|
|
||||||
|
int32_t tDeserializeSEpSet(void *buf, int32_t bufLen, SEpSet *pEpset) {
|
||||||
SDecoder decoder = {0};
|
SDecoder decoder = {0};
|
||||||
tDecoderInit(&decoder, buf, bufLen);
|
tDecoderInit(&decoder, buf, bufLen);
|
||||||
if (tStartDecode(&decoder) < 0) return -1;
|
if (tStartDecode(&decoder) < 0) return -1;
|
||||||
if (tDecodeSEpSet(&decoder, &pReq->epSet) < 0) return -1;
|
if (tDecodeSEpSet(&decoder, pEpset) < 0) return -1;
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -3507,31 +3509,6 @@ int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *
|
||||||
|
|
||||||
void tFreeSSchedulerHbRsp(SSchedulerHbRsp *pRsp) { taosArrayDestroy(pRsp->taskStatus); }
|
void tFreeSSchedulerHbRsp(SSchedulerHbRsp *pRsp) { taosArrayDestroy(pRsp->taskStatus); }
|
||||||
|
|
||||||
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
|
|
||||||
SEncoder encoder = {0};
|
|
||||||
tEncoderInit(&encoder, buf, bufLen);
|
|
||||||
|
|
||||||
if (tStartEncode(&encoder) < 0) return -1;
|
|
||||||
if (tEncodeI32(&encoder, pRsp->code) < 0) return -1;
|
|
||||||
tEndEncode(&encoder);
|
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
|
||||||
tEncoderClear(&encoder);
|
|
||||||
return tlen;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
|
|
||||||
SDecoder decoder = {0};
|
|
||||||
tDecoderInit(&decoder, buf, bufLen);
|
|
||||||
|
|
||||||
if (tStartDecode(&decoder) < 0) return -1;
|
|
||||||
if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1;
|
|
||||||
tEndDecode(&decoder);
|
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t tSerializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp) {
|
int32_t tSerializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp) {
|
||||||
// SEncoder encoder = {0};
|
// SEncoder encoder = {0};
|
||||||
// tEncoderInit(&encoder, buf, bufLen);
|
// tEncoderInit(&encoder, buf, bufLen);
|
||||||
|
@ -3861,10 +3838,9 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
|
||||||
if (tStartEncode(pCoder) < 0) return -1;
|
if (tStartEncode(pCoder) < 0) return -1;
|
||||||
|
|
||||||
if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1;
|
if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1;
|
||||||
|
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
|
||||||
if (tEncodeI64(pCoder, pReq->uid) < 0) return -1;
|
if (tEncodeI64(pCoder, pReq->uid) < 0) return -1;
|
||||||
if (tEncodeI64(pCoder, pReq->ctime) < 0) return -1;
|
if (tEncodeI64(pCoder, pReq->ctime) < 0) return -1;
|
||||||
|
|
||||||
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
|
|
||||||
if (tEncodeI32(pCoder, pReq->ttl) < 0) return -1;
|
if (tEncodeI32(pCoder, pReq->ttl) < 0) return -1;
|
||||||
if (tEncodeI8(pCoder, pReq->type) < 0) return -1;
|
if (tEncodeI8(pCoder, pReq->type) < 0) return -1;
|
||||||
|
|
||||||
|
@ -3887,10 +3863,9 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
|
||||||
if (tStartDecode(pCoder) < 0) return -1;
|
if (tStartDecode(pCoder) < 0) return -1;
|
||||||
|
|
||||||
if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1;
|
if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1;
|
||||||
|
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
|
||||||
if (tDecodeI64(pCoder, &pReq->uid) < 0) return -1;
|
if (tDecodeI64(pCoder, &pReq->uid) < 0) return -1;
|
||||||
if (tDecodeI64(pCoder, &pReq->ctime) < 0) return -1;
|
if (tDecodeI64(pCoder, &pReq->ctime) < 0) return -1;
|
||||||
|
|
||||||
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
|
|
||||||
if (tDecodeI32(pCoder, &pReq->ttl) < 0) return -1;
|
if (tDecodeI32(pCoder, &pReq->ttl) < 0) return -1;
|
||||||
if (tDecodeI8(pCoder, &pReq->type) < 0) return -1;
|
if (tDecodeI8(pCoder, &pReq->type) < 0) return -1;
|
||||||
|
|
||||||
|
|
|
@ -101,8 +101,6 @@ SArray *qmGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSP, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSP, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
|
|
||||||
|
|
|
@ -292,8 +292,6 @@ SArray *vmGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_DISCONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_DISCONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
// if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_SET_CUR, vmPutNodeMsgToWriteQueue, 0)== NULL) goto _OVER;
|
// if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_SET_CUR, vmPutNodeMsgToWriteQueue, 0)== NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
|
@ -64,6 +64,8 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
|
||||||
} else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) {
|
} else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) {
|
||||||
qWorkerProcessFetchRsp(NULL, NULL, pRpc);
|
qWorkerProcessFetchRsp(NULL, NULL, pRpc);
|
||||||
return;
|
return;
|
||||||
|
} else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) {
|
||||||
|
dmSetMnodeEpSet(&pDnode->data, pEpSet);
|
||||||
} else {
|
} else {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,29 +206,28 @@ static inline void dmSendRsp(SRpcMsg *pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
|
static void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
|
||||||
SMEpSet msg = {0};
|
SEpSet epSet = {0};
|
||||||
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &msg.epSet);
|
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
|
||||||
|
|
||||||
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
|
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
|
||||||
pMsg->pCont = rpcMallocCont(contLen);
|
pMsg->pCont = rpcMallocCont(contLen);
|
||||||
if (pMsg->pCont == NULL) {
|
if (pMsg->pCont == NULL) {
|
||||||
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
|
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
} else {
|
} else {
|
||||||
tSerializeSMEpSet(pMsg->pCont, contLen, &msg);
|
tSerializeSEpSet(pMsg->pCont, contLen, &epSet);
|
||||||
pMsg->contLen = contLen;
|
pMsg->contLen = contLen;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) {
|
static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) {
|
||||||
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
|
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
|
||||||
SMEpSet msg = {.epSet = *pNewEpSet};
|
int32_t contLen = tSerializeSEpSet(NULL, 0, pNewEpSet);
|
||||||
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
|
|
||||||
|
|
||||||
rsp.pCont = rpcMallocCont(contLen);
|
rsp.pCont = rpcMallocCont(contLen);
|
||||||
if (rsp.pCont == NULL) {
|
if (rsp.pCont == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
} else {
|
} else {
|
||||||
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
|
tSerializeSEpSet(rsp.pCont, contLen, pNewEpSet);
|
||||||
rsp.contLen = contLen;
|
rsp.contLen = contLen;
|
||||||
}
|
}
|
||||||
dmSendRsp(&rsp);
|
dmSendRsp(&rsp);
|
||||||
|
|
|
@ -326,6 +326,7 @@ void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet
|
||||||
}
|
}
|
||||||
|
|
||||||
void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet) {
|
void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet) {
|
||||||
|
if (memcmp(pEpSet, &pData->mnodeEps, sizeof(SEpSet)) == 0) return;
|
||||||
taosThreadRwlockWrlock(&pData->lock);
|
taosThreadRwlockWrlock(&pData->lock);
|
||||||
pData->mnodeEps = *pEpSet;
|
pData->mnodeEps = *pEpSet;
|
||||||
taosThreadRwlockUnlock(&pData->lock);
|
taosThreadRwlockUnlock(&pData->lock);
|
||||||
|
|
|
@ -124,6 +124,11 @@ typedef enum {
|
||||||
TRN_POLICY_RETRY = 1,
|
TRN_POLICY_RETRY = 1,
|
||||||
} ETrnPolicy;
|
} ETrnPolicy;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
TRN_EXEC_PARALLEL = 0,
|
||||||
|
TRN_EXEC_ONE_BY_ONE = 1,
|
||||||
|
} ETrnExecType;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
DND_REASON_ONLINE = 0,
|
DND_REASON_ONLINE = 0,
|
||||||
DND_REASON_STATUS_MSG_TIMEOUT,
|
DND_REASON_STATUS_MSG_TIMEOUT,
|
||||||
|
@ -152,6 +157,7 @@ typedef struct {
|
||||||
ETrnStage stage;
|
ETrnStage stage;
|
||||||
ETrnPolicy policy;
|
ETrnPolicy policy;
|
||||||
ETrnType type;
|
ETrnType type;
|
||||||
|
ETrnExecType parallel;
|
||||||
int32_t code;
|
int32_t code;
|
||||||
int32_t failedTimes;
|
int32_t failedTimes;
|
||||||
SRpcHandleInfo rpcInfo;
|
SRpcHandleInfo rpcInfo;
|
||||||
|
|
|
@ -81,6 +81,7 @@ typedef struct {
|
||||||
bool standby;
|
bool standby;
|
||||||
bool restored;
|
bool restored;
|
||||||
int32_t errCode;
|
int32_t errCode;
|
||||||
|
int32_t transId;
|
||||||
} SSyncMgmt;
|
} SSyncMgmt;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -25,7 +25,7 @@ extern "C" {
|
||||||
int32_t mndInitSync(SMnode *pMnode);
|
int32_t mndInitSync(SMnode *pMnode);
|
||||||
void mndCleanupSync(SMnode *pMnode);
|
void mndCleanupSync(SMnode *pMnode);
|
||||||
bool mndIsMaster(SMnode *pMnode);
|
bool mndIsMaster(SMnode *pMnode);
|
||||||
int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw);
|
int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId);
|
||||||
void mndSyncStart(SMnode *pMnode);
|
void mndSyncStart(SMnode *pMnode);
|
||||||
void mndSyncStop(SMnode *pMnode);
|
void mndSyncStop(SMnode *pMnode);
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
|
||||||
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
|
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
|
||||||
void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen);
|
void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen);
|
||||||
void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb);
|
void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb);
|
||||||
|
void mndTransSetExecOneByOne(STrans *pTrans);
|
||||||
|
|
||||||
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
|
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
|
||||||
void mndTransProcessRsp(SRpcMsg *pRsp);
|
void mndTransProcessRsp(SRpcMsg *pRsp);
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "mndShow.h"
|
#include "mndShow.h"
|
||||||
#include "mndTrans.h"
|
#include "mndTrans.h"
|
||||||
#include "mndUser.h"
|
#include "mndUser.h"
|
||||||
|
#include "mndSync.h"
|
||||||
|
|
||||||
#define MNODE_VER_NUMBER 1
|
#define MNODE_VER_NUMBER 1
|
||||||
#define MNODE_RESERVE_SIZE 64
|
#define MNODE_RESERVE_SIZE 64
|
||||||
|
@ -222,23 +223,24 @@ bool mndIsMnode(SMnode *pMnode, int32_t dnodeId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
|
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
pEpSet->numOfEps = 0;
|
int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE);
|
||||||
|
void *pIter = NULL;
|
||||||
|
|
||||||
void *pIter = NULL;
|
|
||||||
while (1) {
|
while (1) {
|
||||||
SMnodeObj *pObj = NULL;
|
SMnodeObj *pObj = NULL;
|
||||||
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
|
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
|
||||||
if (pIter == NULL) break;
|
if (pIter == NULL) break;
|
||||||
if (pObj->pDnode == NULL) {
|
|
||||||
mError("mnode:%d, no corresponding dnode exists", pObj->id);
|
if (pObj->id == pMnode->selfDnodeId) {
|
||||||
} else {
|
if (mndIsMaster(pMnode)) {
|
||||||
if (pObj->id == pMnode->selfDnodeId || pObj->state == TAOS_SYNC_STATE_LEADER) {
|
|
||||||
pEpSet->inUse = pEpSet->numOfEps;
|
pEpSet->inUse = pEpSet->numOfEps;
|
||||||
|
} else {
|
||||||
|
pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes;
|
||||||
}
|
}
|
||||||
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
|
||||||
sdbRelease(pSdb, pObj);
|
|
||||||
}
|
}
|
||||||
|
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
||||||
|
sdbRelease(pSdb, pObj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -312,25 +314,6 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
|
||||||
createEpset.eps[0].port = pDnode->port;
|
createEpset.eps[0].port = pDnode->port;
|
||||||
memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
|
memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
|
||||||
|
|
||||||
{
|
|
||||||
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq);
|
|
||||||
void *pReq = taosMemoryMalloc(contLen);
|
|
||||||
tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq);
|
|
||||||
|
|
||||||
STransAction action = {
|
|
||||||
.epSet = alterEpset,
|
|
||||||
.pCont = pReq,
|
|
||||||
.contLen = contLen,
|
|
||||||
.msgType = TDMT_DND_ALTER_MNODE,
|
|
||||||
.acceptableCode = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
|
||||||
taosMemoryFree(pReq);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq);
|
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq);
|
||||||
void *pReq = taosMemoryMalloc(contLen);
|
void *pReq = taosMemoryMalloc(contLen);
|
||||||
|
@ -350,6 +333,25 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq);
|
||||||
|
void *pReq = taosMemoryMalloc(contLen);
|
||||||
|
tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq);
|
||||||
|
|
||||||
|
STransAction action = {
|
||||||
|
.epSet = alterEpset,
|
||||||
|
.pCont = pReq,
|
||||||
|
.contLen = contLen,
|
||||||
|
.msgType = TDMT_DND_ALTER_MNODE,
|
||||||
|
.acceptableCode = 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,6 +367,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
|
||||||
if (pTrans == NULL) goto _OVER;
|
if (pTrans == NULL) goto _OVER;
|
||||||
|
|
||||||
mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
|
mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
|
||||||
|
mndTransSetExecOneByOne(pTrans);
|
||||||
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
|
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
|
||||||
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
|
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
|
||||||
if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER;
|
if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER;
|
||||||
|
@ -536,7 +539,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
|
||||||
if (pTrans == NULL) goto _OVER;
|
if (pTrans == NULL) goto _OVER;
|
||||||
|
|
||||||
mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
|
mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
|
||||||
|
mndTransSetExecOneByOne(pTrans);
|
||||||
if (mndSetDropMnodeRedoLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
|
if (mndSetDropMnodeRedoLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
|
||||||
if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
|
if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
|
||||||
if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj) != 0) goto _OVER;
|
if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj) != 0) goto _OVER;
|
||||||
|
@ -701,14 +704,17 @@ static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mTrace("trans:-1, sync reconfig will be proposed");
|
||||||
|
|
||||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
pMgmt->standby = 0;
|
pMgmt->standby = 0;
|
||||||
int32_t code = syncReconfig(pMgmt->sync, &cfg);
|
int32_t code = syncReconfig(pMgmt->sync, &cfg);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
mError("failed to alter mnode sync since %s", terrstr());
|
mError("trans:-1, failed to propose sync reconfig since %s", terrstr());
|
||||||
return code;
|
return code;
|
||||||
} else {
|
} else {
|
||||||
pMgmt->errCode = 0;
|
pMgmt->errCode = 0;
|
||||||
|
pMgmt->transId = -1;
|
||||||
tsem_wait(&pMgmt->syncSem);
|
tsem_wait(&pMgmt->syncSem);
|
||||||
mInfo("alter mnode sync result:%s", tstrerror(pMgmt->errCode));
|
mInfo("alter mnode sync result:%s", tstrerror(pMgmt->errCode));
|
||||||
terrno = pMgmt->errCode;
|
terrno = pMgmt->errCode;
|
||||||
|
|
|
@ -507,6 +507,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
||||||
|
|
||||||
mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name);
|
mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name);
|
||||||
mndTransSetDbInfo(pTrans, pDb);
|
mndTransSetDbInfo(pTrans, pDb);
|
||||||
|
mndTransSetExecOneByOne(pTrans);
|
||||||
|
|
||||||
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
|
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
|
||||||
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
|
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
|
||||||
|
|
|
@ -28,16 +28,26 @@ int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
|
||||||
int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); }
|
int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); }
|
||||||
|
|
||||||
void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||||
SMnode *pMnode = pFsm->data;
|
SMnode *pMnode = pFsm->data;
|
||||||
SSdbRaw *pRaw = pMsg->pCont;
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
|
SSdbRaw *pRaw = pMsg->pCont;
|
||||||
|
|
||||||
mTrace("raw:%p, apply to sdb, ver:%" PRId64 " term:%" PRId64 " role:%s", pRaw, cbMeta.index, cbMeta.term,
|
int32_t transId = sdbGetIdFromRaw(pRaw);
|
||||||
syncStr(cbMeta.state));
|
pMgmt->errCode = cbMeta.code;
|
||||||
sdbWriteWithoutFree(pMnode->pSdb, pRaw);
|
mTrace("trans:%d, is proposed, savedTransId:%d code:0x%x, ver:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId,
|
||||||
sdbSetApplyIndex(pMnode->pSdb, cbMeta.index);
|
pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw);
|
||||||
sdbSetApplyTerm(pMnode->pSdb, cbMeta.term);
|
|
||||||
if (cbMeta.state == TAOS_SYNC_STATE_LEADER) {
|
if (pMgmt->errCode == 0) {
|
||||||
tsem_post(&pMnode->syncMgmt.syncSem);
|
sdbWriteWithoutFree(pMnode->pSdb, pRaw);
|
||||||
|
sdbSetApplyIndex(pMnode->pSdb, cbMeta.index);
|
||||||
|
sdbSetApplyTerm(pMnode->pSdb, cbMeta.term);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pMgmt->transId == transId) {
|
||||||
|
if (pMgmt->errCode != 0) {
|
||||||
|
mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
|
||||||
|
}
|
||||||
|
tsem_post(&pMgmt->syncSem);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,11 +88,19 @@ int32_t mndSnapshotApply(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char
|
||||||
}
|
}
|
||||||
|
|
||||||
void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
|
void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
|
||||||
mInfo("mndReConfig cbMeta.code:%d, cbMeta.currentTerm:%" PRId64 ", cbMeta.term:%" PRId64 ", cbMeta.index:%" PRId64,
|
SMnode *pMnode = pFsm->data;
|
||||||
cbMeta.code, cbMeta.currentTerm, cbMeta.term, cbMeta.index);
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
SMnode *pMnode = pFsm->data;
|
|
||||||
pMnode->syncMgmt.errCode = cbMeta.code;
|
pMgmt->errCode = cbMeta.code;
|
||||||
tsem_post(&pMnode->syncMgmt.syncSem);
|
mInfo("trans:-1, sync reconfig is proposed, savedTransId:%d code:0x%x, curTerm:%" PRId64 " term:%" PRId64,
|
||||||
|
pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term);
|
||||||
|
|
||||||
|
if (pMgmt->transId == -1) {
|
||||||
|
if (pMgmt->errCode != 0) {
|
||||||
|
mError("trans:-1, failed to propose sync reconfig since %s", tstrerror(pMgmt->errCode));
|
||||||
|
}
|
||||||
|
tsem_post(&pMgmt->syncSem);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
||||||
|
@ -165,15 +183,17 @@ void mndCleanupSync(SMnode *pMnode) {
|
||||||
memset(pMgmt, 0, sizeof(SSyncMgmt));
|
memset(pMgmt, 0, sizeof(SSyncMgmt));
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) {
|
int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
|
||||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
pMgmt->errCode = 0;
|
SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)};
|
||||||
|
|
||||||
SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)};
|
|
||||||
rsp.pCont = rpcMallocCont(rsp.contLen);
|
rsp.pCont = rpcMallocCont(rsp.contLen);
|
||||||
if (rsp.pCont == NULL) return -1;
|
if (rsp.pCont == NULL) return -1;
|
||||||
memcpy(rsp.pCont, pRaw, rsp.contLen);
|
memcpy(rsp.pCont, pRaw, rsp.contLen);
|
||||||
|
|
||||||
|
pMgmt->errCode = 0;
|
||||||
|
pMgmt->transId = transId;
|
||||||
|
mTrace("trans:%d, will be proposed", pMgmt->transId);
|
||||||
|
|
||||||
const bool isWeak = false;
|
const bool isWeak = false;
|
||||||
int32_t code = syncPropose(pMgmt->sync, &rsp, isWeak);
|
int32_t code = syncPropose(pMgmt->sync, &rsp, isWeak);
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
|
@ -187,7 +207,11 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcFreeCont(rsp.pCont);
|
rpcFreeCont(rsp.pCont);
|
||||||
if (code != 0) return code;
|
if (code != 0) {
|
||||||
|
mError("trans:%d, failed to propose, code:0x%x", pMgmt->transId, code);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
return pMgmt->errCode;
|
return pMgmt->errCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,6 +236,17 @@ void mndSyncStop(SMnode *pMnode) {}
|
||||||
|
|
||||||
bool mndIsMaster(SMnode *pMnode) {
|
bool mndIsMaster(SMnode *pMnode) {
|
||||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
|
|
||||||
ESyncState state = syncGetMyRole(pMgmt->sync);
|
ESyncState state = syncGetMyRole(pMgmt->sync);
|
||||||
return (state == TAOS_SYNC_STATE_LEADER) && (pMnode->syncMgmt.restored);
|
if (state != TAOS_SYNC_STATE_LEADER) {
|
||||||
|
terrno = TSDB_CODE_SYN_NOT_LEADER;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pMgmt->restored) {
|
||||||
|
terrno = TSDB_CODE_APP_NOT_READY;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue