Merge branch 'develop' into cxs
This commit is contained in:
commit
fd8362222d
|
@ -133,8 +133,10 @@ IF (TD_LINUX)
|
||||||
|
|
||||||
IF (TD_MEMORY_SANITIZER)
|
IF (TD_MEMORY_SANITIZER)
|
||||||
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
|
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
|
||||||
|
MESSAGE(STATUS "memory sanitizer detected as true")
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
||||||
|
MESSAGE(STATUS "memory sanitizer detected as false")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
SET(RELEASE_FLAGS "-O3 -Wno-error")
|
SET(RELEASE_FLAGS "-O3 -Wno-error")
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ ENDIF ()
|
||||||
MESSAGE(STATUS "============= compile version parameter information start ============= ")
|
MESSAGE(STATUS "============= compile version parameter information start ============= ")
|
||||||
MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER})
|
MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER})
|
||||||
MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE})
|
MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE})
|
||||||
MESSAGE(STATUS "communit commit id:" ${TD_VER_GIT})
|
MESSAGE(STATUS "community commit id:" ${TD_VER_GIT})
|
||||||
MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL})
|
MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL})
|
||||||
MESSAGE(STATUS "build date:" ${TD_VER_DATE})
|
MESSAGE(STATUS "build date:" ${TD_VER_DATE})
|
||||||
MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE})
|
MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE})
|
||||||
|
|
|
@ -43,7 +43,7 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG
|
||||||
|
|
||||||
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。
|
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。
|
||||||
|
|
||||||
一张超级表最多容许1024列,如果一个采集点采集的物理量个数超过1024,需要建多张超级表来处理。一个系统可以有多个DB,一个DB里可以有一到多个超级表。
|
一张超级表最多容许 1024 列,如果一个采集点采集的物理量个数超过 1024,需要建多张超级表来处理。一个系统可以有多个 DB,一个 DB 里可以有一到多个超级表。(从 2.1.7.0 版本开始,列数限制由 1024 列放宽到了 4096 列。)
|
||||||
|
|
||||||
## <a class="anchor" id="create-table"></a>创建表
|
## <a class="anchor" id="create-table"></a>创建表
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,10 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
||||||
|
|
||||||
编辑taos.cfg文件(默认路径/etc/taos/taos.cfg),将firstEP修改为TDengine服务器的End Point,例如:h1.taos.com:6030
|
编辑taos.cfg文件(默认路径/etc/taos/taos.cfg),将firstEP修改为TDengine服务器的End Point,例如:h1.taos.com:6030
|
||||||
|
|
||||||
**提示: 如本机没有部署TDengine服务,仅安装了应用驱动,则taos.cfg中仅需配置firstEP,无需配置FQDN。**
|
**提示: **
|
||||||
|
|
||||||
|
1. **如本机没有部署TDengine服务,仅安装了应用驱动,则taos.cfg中仅需配置firstEP,无需配置FQDN。**
|
||||||
|
2. **为防止与服务器端连接时出现“unable to resolve FQDN”错误,建议确认客户端的hosts文件已经配置正确的FQDN值。**
|
||||||
|
|
||||||
**Windows x64/x86**
|
**Windows x64/x86**
|
||||||
|
|
||||||
|
@ -96,7 +99,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
||||||
**提示:**
|
**提示:**
|
||||||
|
|
||||||
1. **如利用FQDN连接服务器,必须确认本机网络环境DNS已配置好,或在hosts文件中添加FQDN寻址记录,如编辑C:\Windows\system32\drivers\etc\hosts,添加如下的记录:`192.168.1.99 h1.taos.com` **
|
1. **如利用FQDN连接服务器,必须确认本机网络环境DNS已配置好,或在hosts文件中添加FQDN寻址记录,如编辑C:\Windows\system32\drivers\etc\hosts,添加如下的记录:`192.168.1.99 h1.taos.com` **
|
||||||
2.**卸载:运行unins000.exe可卸载TDengine应用驱动。**
|
2. **卸载:运行unins000.exe可卸载TDengine应用驱动。**
|
||||||
|
|
||||||
### 安装验证
|
### 安装验证
|
||||||
|
|
||||||
|
|
|
@ -119,9 +119,14 @@ taos>
|
||||||
|
|
||||||
上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。
|
上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。
|
||||||
|
|
||||||
|
|
||||||
|
**提示:**
|
||||||
|
|
||||||
|
- 以下所有执行命令的操作需要先登陆进TDengine系统,必要时请使用root权限。
|
||||||
|
|
||||||
### 添加数据节点
|
### 添加数据节点
|
||||||
|
|
||||||
执行CLI程序taos,使用root账号登录进系统,执行:
|
执行CLI程序taos,执行:
|
||||||
|
|
||||||
```
|
```
|
||||||
CREATE DNODE "fqdn:port";
|
CREATE DNODE "fqdn:port";
|
||||||
|
@ -131,7 +136,7 @@ CREATE DNODE "fqdn:port";
|
||||||
|
|
||||||
### 删除数据节点
|
### 删除数据节点
|
||||||
|
|
||||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
执行CLI程序taos,执行:
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
DROP DNODE "fqdn:port | dnodeID";
|
DROP DNODE "fqdn:port | dnodeID";
|
||||||
|
@ -153,7 +158,7 @@ DROP DNODE "fqdn:port | dnodeID";
|
||||||
|
|
||||||
手动将某个vnode迁移到指定的dnode。
|
手动将某个vnode迁移到指定的dnode。
|
||||||
|
|
||||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
执行CLI程序taos,执行:
|
||||||
|
|
||||||
```mysql
|
```mysql
|
||||||
ALTER DNODE <source-dnodeId> BALANCE "VNODE:<vgId>-DNODE:<dest-dnodeId>";
|
ALTER DNODE <source-dnodeId> BALANCE "VNODE:<vgId>-DNODE:<dest-dnodeId>";
|
||||||
|
@ -169,7 +174,7 @@ ALTER DNODE <source-dnodeId> BALANCE "VNODE:<vgId>-DNODE:<dest-dnodeId>";
|
||||||
|
|
||||||
### 查看数据节点
|
### 查看数据节点
|
||||||
|
|
||||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
执行CLI程序taos,执行:
|
||||||
```mysql
|
```mysql
|
||||||
SHOW DNODES;
|
SHOW DNODES;
|
||||||
```
|
```
|
||||||
|
@ -180,8 +185,9 @@ SHOW DNODES;
|
||||||
|
|
||||||
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
||||||
|
|
||||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
执行CLI程序taos,执行:
|
||||||
```mysql
|
```mysql
|
||||||
|
USE SOME_DATABASE;
|
||||||
SHOW VGROUPS;
|
SHOW VGROUPS;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -652,7 +652,7 @@ rmtaos
|
||||||
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符
|
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符
|
||||||
- 表的列名:不能包含特殊字符,不能超过 64 个字符
|
- 表的列名:不能包含特殊字符,不能超过 64 个字符
|
||||||
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
|
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
|
||||||
- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳
|
- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列)
|
||||||
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
|
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
|
||||||
- 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte
|
- 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte
|
||||||
- 数据库副本数:不能超过 3
|
- 数据库副本数:不能超过 3
|
||||||
|
@ -665,7 +665,7 @@ rmtaos
|
||||||
- 库的个数:仅受节点个数限制
|
- 库的个数:仅受节点个数限制
|
||||||
- 单个库上虚拟节点个数:不能超过 64 个
|
- 单个库上虚拟节点个数:不能超过 64 个
|
||||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||||
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
|
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
|
||||||
|
|
||||||
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
|
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
|
||||||
|
|
||||||
|
|
|
@ -233,7 +233,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
|
||||||
```
|
```
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1) 列的最大个数为1024,最小个数为2;
|
1) 列的最大个数为1024,最小个数为2;(从 2.1.7.0 版本开始,改为最多允许 4096 列)
|
||||||
|
|
||||||
2) 列名最大长度为64。
|
2) 列名最大长度为64。
|
||||||
|
|
||||||
|
@ -1064,7 +1064,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
```mysql
|
```mysql
|
||||||
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
```
|
```
|
||||||
功能说明:统计表/超级表中某列的值最后写入的非NULL值。
|
功能说明:统计表/超级表中某列的值最后写入的非 NULL 值。
|
||||||
|
|
||||||
返回结果数据类型:同应用的字段。
|
返回结果数据类型:同应用的字段。
|
||||||
|
|
||||||
|
@ -1074,9 +1074,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
|
|
||||||
1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);
|
1)如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*);
|
||||||
|
|
||||||
2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
|
2)如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。
|
||||||
|
|
||||||
|
3)在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
```mysql
|
```mysql
|
||||||
|
@ -1225,7 +1227,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
||||||
|
|
||||||
适用于:**表、超级表**。
|
适用于:**表、超级表**。
|
||||||
|
|
||||||
限制:LAST_ROW()不能与INTERVAL一起使用。
|
限制:LAST_ROW() 不能与 INTERVAL 一起使用。
|
||||||
|
|
||||||
|
说明:在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
|
||||||
|
|
||||||
示例:
|
示例:
|
||||||
```mysql
|
```mysql
|
||||||
|
@ -1454,10 +1458,10 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
|
||||||
|
|
||||||
- 数据库名最大长度为 32。
|
- 数据库名最大长度为 32。
|
||||||
- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
|
- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
|
||||||
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。
|
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
|
||||||
- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。
|
- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。
|
||||||
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M。
|
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M。
|
||||||
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
|
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
|
||||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。
|
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。
|
||||||
|
|
||||||
## TAOS SQL 其他约定
|
## TAOS SQL 其他约定
|
||||||
|
|
|
@ -98,7 +98,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
|
||||||
|
|
||||||
## 10. 我怎么创建超过1024列的表?
|
## 10. 我怎么创建超过1024列的表?
|
||||||
|
|
||||||
使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。
|
使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。)
|
||||||
|
|
||||||
## 11. 最有效的写入数据的方法是什么?
|
## 11. 最有效的写入数据的方法是什么?
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
### A Typical IoT Scenario
|
### A Typical IoT Scenario
|
||||||
|
|
||||||
In typical industry IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the data collection devices of the same type, there are often many specific collection devices distributed in places. Big Data processing system aims to collect all kinds of data, and then store and analyze them. For the same kind of devices, the data collected are very structured. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
|
In typical industry IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the data collection devices of the same type, there are often many specific collection devices distributed in places. Big Data processing system aims to collect all kinds of data, then store and analyze them. For the same kind of devices, the data collected are very structured. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
|
||||||
|
|
||||||
<figure><table>
|
<figure><table>
|
||||||
<thead><tr>
|
<thead><tr>
|
||||||
|
@ -126,13 +126,13 @@ Since time-series data is most likely to be structured data, TDengine adopts the
|
||||||
|
|
||||||
### One Table for One Data Collection Point
|
### One Table for One Data Collection Point
|
||||||
|
|
||||||
To utilize this time-series and other data features, TDengine requires the user to create a table for each data collection point to store collected time-series data. For example, if there are over 10 millions smart meters, it means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages:
|
To utilize this time-series and other data features, TDengine requires the user to create a table for each data collection point to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages:
|
||||||
|
|
||||||
1. Guarantee that all data from a data collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one data collection point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude.
|
1. Guarantee that all data from a data collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one data collection point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude.
|
||||||
2. Since the data generation process of each data collection device is completely independent, and each data collection point has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the performance.
|
2. Since the data generation process of each data collection device is completely independent, and each data collection point has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the performance.
|
||||||
3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block.
|
3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block.
|
||||||
|
|
||||||
If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to be continuously stored together. **One table for each data collection point can ensure the optimal performance of insert and query of a single data collection point to the greatest extent.**
|
If the data of multiple devices are traditionally written into a table, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to be continuously stored together. **One table for each data collection point can ensure the optimal performance of insert and query of a single data collection point to the greatest extent.**
|
||||||
|
|
||||||
TDengine suggests using data collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns.
|
TDengine suggests using data collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns.
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ STable is an abstract set for a type of data collection point. A STable contains
|
||||||
|
|
||||||
In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.**
|
In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.**
|
||||||
|
|
||||||
When aggregating multiple data collection points with the same data type, TDengine will first find out the tables that meet the tag filter conditions from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of data aggregation.
|
When aggregating multiple data collection points with the same data type, TDengine will first find out the tables that meet the tag filter conditions from the STables, then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of data aggregation.
|
||||||
|
|
||||||
## <a class="anchor" id="cluster"></a> Cluster and Primary Logic Unit
|
## <a class="anchor" id="cluster"></a> Cluster and Primary Logic Unit
|
||||||
|
|
||||||
|
@ -161,21 +161,21 @@ Logical structure diagram of TDengine distributed architecture as following:
|
||||||
|
|
||||||
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
|
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
|
||||||
|
|
||||||
**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
|
**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
|
||||||
|
|
||||||
**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
|
**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
|
||||||
|
|
||||||
**Virtual node (vnode)**: In order to better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage, and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs, and is created and managed by the management node.
|
**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
|
||||||
|
|
||||||
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strong consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction.
|
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction.
|
||||||
|
|
||||||
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
|
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
|
||||||
|
|
||||||
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
|
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
|
||||||
|
|
||||||
### Node Communication
|
### Node Communication
|
||||||
|
|
||||||
**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation.
|
**Communication mode**: The communication among each data node of TDengine system, and among the application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation.
|
||||||
|
|
||||||
**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly.
|
**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly.
|
||||||
|
|
||||||
|
@ -187,13 +187,13 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
|
||||||
|
|
||||||
1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
|
1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
|
||||||
2. Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
|
2. Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
|
||||||
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connected. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
|
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
|
||||||
|
|
||||||
**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
|
**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
|
||||||
|
|
||||||
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
||||||
|
|
||||||
**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode by self, it will reply the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
||||||
|
|
||||||
### A Typical Data Writinfg Process
|
### A Typical Data Writinfg Process
|
||||||
|
|
||||||
|
@ -209,9 +209,9 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei
|
||||||
5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
|
5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
|
||||||
6. TAOSC notifies APP that writing is successful.
|
6. TAOSC notifies APP that writing is successful.
|
||||||
|
|
||||||
For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode.
|
For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode.
|
||||||
|
|
||||||
For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node.
|
For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node.
|
||||||
|
|
||||||
The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
|
The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
|
||||||
|
|
||||||
|
@ -223,20 +223,20 @@ Through TAOSC caching mechanism, mnode needs to be accessed only when a table is
|
||||||
|
|
||||||
The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts:
|
The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts:
|
||||||
|
|
||||||
- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database update parameter is set to 1. By adopting the model with one table for each data collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with best performance.
|
- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database update parameter is set to 1. By adopting the model with one table for each data collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with the best performance.
|
||||||
- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds.
|
- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds.
|
||||||
- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck.
|
- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover, the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck.
|
||||||
|
|
||||||
Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages:
|
Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages:
|
||||||
|
|
||||||
- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate.
|
- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate.
|
||||||
- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it firstly finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
|
- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
|
||||||
|
|
||||||
### Data Sharding
|
### Data Sharding
|
||||||
|
|
||||||
For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range.
|
For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range.
|
||||||
|
|
||||||
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application.
|
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
|
||||||
|
|
||||||
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,11 @@ Run install_client.sh to install.
|
||||||
|
|
||||||
Edit the taos.cfg file (default path/etc/taos/taos.cfg) and change firstEP to End Point of the TDengine server, for example: [h1.taos.com](http://h1.taos.com/):6030.
|
Edit the taos.cfg file (default path/etc/taos/taos.cfg) and change firstEP to End Point of the TDengine server, for example: [h1.taos.com](http://h1.taos.com/):6030.
|
||||||
|
|
||||||
**Tip: If no TDengine service deployed in this machine, but only the application driver is installed, only firstEP needs to be configured in taos.cfg, and FQDN does not.**
|
**Tip: **
|
||||||
|
|
||||||
|
**1. If no TDengine service deployed in this machine, but only the application driver is installed, only firstEP needs to be configured in taos.cfg, and FQDN does not.**
|
||||||
|
|
||||||
|
**2. To prevent “unable to resolve FQDN” error when connecting to the server, ensure that the hosts file of the client has the correct FQDN value.**
|
||||||
|
|
||||||
**Windows x64/x86**
|
**Windows x64/x86**
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2021 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TDENGINE_TSCPARSELINE_H
|
||||||
|
#define TDENGINE_TSCPARSELINE_H
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
char* key;
|
||||||
|
uint8_t type;
|
||||||
|
int16_t length;
|
||||||
|
char* value;
|
||||||
|
} TAOS_SML_KV;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
char* stableName;
|
||||||
|
|
||||||
|
char* childTableName;
|
||||||
|
TAOS_SML_KV* tags;
|
||||||
|
int32_t tagNum;
|
||||||
|
|
||||||
|
// first kv must be timestamp
|
||||||
|
TAOS_SML_KV* fields;
|
||||||
|
int32_t fieldNum;
|
||||||
|
} TAOS_SML_DATA_POINT;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
SML_TIME_STAMP_NOW,
|
||||||
|
SML_TIME_STAMP_SECONDS,
|
||||||
|
SML_TIME_STAMP_MILLI_SECONDS,
|
||||||
|
SML_TIME_STAMP_MICRO_SECONDS,
|
||||||
|
SML_TIME_STAMP_NANO_SECONDS
|
||||||
|
} SMLTimeStampType;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
uint64_t id;
|
||||||
|
SHashObj* smlDataToSchema;
|
||||||
|
} SSmlLinesInfo;
|
||||||
|
|
||||||
|
int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info);
|
||||||
|
bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info);
|
||||||
|
int32_t isValidChildTableName(const char *pTbName, int16_t len);
|
||||||
|
|
||||||
|
bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
|
||||||
|
uint16_t len, SSmlLinesInfo* info);
|
||||||
|
int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
|
||||||
|
uint16_t len, SSmlLinesInfo* info);
|
||||||
|
|
||||||
|
void destroySmlDataPoint(TAOS_SML_DATA_POINT* point);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // TDENGINE_TSCPARSELINE_H
|
|
@ -17,6 +17,7 @@
|
||||||
#include "tscLog.h"
|
#include "tscLog.h"
|
||||||
|
|
||||||
#include "taos.h"
|
#include "taos.h"
|
||||||
|
#include "tscParseLine.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char sTableName[TSDB_TABLE_NAME_LEN];
|
char sTableName[TSDB_TABLE_NAME_LEN];
|
||||||
|
@ -27,38 +28,6 @@ typedef struct {
|
||||||
uint8_t precision;
|
uint8_t precision;
|
||||||
} SSmlSTableSchema;
|
} SSmlSTableSchema;
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
char* key;
|
|
||||||
uint8_t type;
|
|
||||||
int16_t length;
|
|
||||||
char* value;
|
|
||||||
} TAOS_SML_KV;
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
char* stableName;
|
|
||||||
|
|
||||||
char* childTableName;
|
|
||||||
TAOS_SML_KV* tags;
|
|
||||||
int32_t tagNum;
|
|
||||||
|
|
||||||
// first kv must be timestamp
|
|
||||||
TAOS_SML_KV* fields;
|
|
||||||
int32_t fieldNum;
|
|
||||||
} TAOS_SML_DATA_POINT;
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
SML_TIME_STAMP_NOW,
|
|
||||||
SML_TIME_STAMP_SECONDS,
|
|
||||||
SML_TIME_STAMP_MILLI_SECONDS,
|
|
||||||
SML_TIME_STAMP_MICRO_SECONDS,
|
|
||||||
SML_TIME_STAMP_NANO_SECONDS
|
|
||||||
} SMLTimeStampType;
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
uint64_t id;
|
|
||||||
SHashObj* smlDataToSchema;
|
|
||||||
} SSmlLinesInfo;
|
|
||||||
|
|
||||||
//=================================================================================================
|
//=================================================================================================
|
||||||
|
|
||||||
static uint64_t linesSmlHandleId = 0;
|
static uint64_t linesSmlHandleId = 0;
|
||||||
|
@ -1565,8 +1534,8 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str, SSmlLinesInfo* info)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
//len does not include '\0' from value.
|
//len does not include '\0' from value.
|
||||||
static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
|
bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
|
||||||
uint16_t len, SSmlLinesInfo* info) {
|
uint16_t len, SSmlLinesInfo* info) {
|
||||||
if (len <= 0) {
|
if (len <= 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1708,7 +1677,7 @@ static int32_t getTimeStampValue(char *value, uint16_t len,
|
||||||
if (len >= 2) {
|
if (len >= 2) {
|
||||||
for (int i = 0; i < len - 2; ++i) {
|
for (int i = 0; i < len - 2; ++i) {
|
||||||
if(!isdigit(value[i])) {
|
if(!isdigit(value[i])) {
|
||||||
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1743,20 +1712,20 @@ static int32_t getTimeStampValue(char *value, uint16_t len,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
|
int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
|
||||||
uint16_t len, SSmlLinesInfo* info) {
|
uint16_t len, SSmlLinesInfo* info) {
|
||||||
int32_t ret;
|
int32_t ret;
|
||||||
SMLTimeStampType type;
|
SMLTimeStampType type;
|
||||||
int64_t tsVal;
|
int64_t tsVal;
|
||||||
|
|
||||||
if (!isTimeStamp(value, len, &type)) {
|
if (!isTimeStamp(value, len, &type)) {
|
||||||
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = getTimeStampValue(value, len, type, &tsVal);
|
ret = getTimeStampValue(value, len, type, &tsVal);
|
||||||
|
@ -1805,7 +1774,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLine
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
|
bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
|
||||||
char *val = NULL;
|
char *val = NULL;
|
||||||
char *cur = key;
|
char *cur = key;
|
||||||
char keyLower[TSDB_COL_NAME_LEN];
|
char keyLower[TSDB_COL_NAME_LEN];
|
||||||
|
@ -1842,7 +1811,7 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
|
||||||
while (*cur != '\0') {
|
while (*cur != '\0') {
|
||||||
if (len > TSDB_COL_NAME_LEN) {
|
if (len > TSDB_COL_NAME_LEN) {
|
||||||
tscError("SML:0x%"PRIx64" Key field cannot exceeds 65 characters", info->id);
|
tscError("SML:0x%"PRIx64" Key field cannot exceeds 65 characters", info->id);
|
||||||
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
|
||||||
}
|
}
|
||||||
//unescaped '=' identifies a tag key
|
//unescaped '=' identifies a tag key
|
||||||
if (*cur == '=' && *(cur - 1) != '\\') {
|
if (*cur == '=' && *(cur - 1) != '\\') {
|
||||||
|
@ -1902,7 +1871,7 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
|
||||||
free(pKV->key);
|
free(pKV->key);
|
||||||
pKV->key = NULL;
|
pKV->key = NULL;
|
||||||
free(value);
|
free(value);
|
||||||
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
free(value);
|
free(value);
|
||||||
|
|
||||||
|
@ -1931,7 +1900,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
|
||||||
tscError("SML:0x%"PRIx64" Measurement field cannot exceeds 193 characters", info->id);
|
tscError("SML:0x%"PRIx64" Measurement field cannot exceeds 193 characters", info->id);
|
||||||
free(pSml->stableName);
|
free(pSml->stableName);
|
||||||
pSml->stableName = NULL;
|
pSml->stableName = NULL;
|
||||||
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||||
}
|
}
|
||||||
//first unescaped comma or space identifies measurement
|
//first unescaped comma or space identifies measurement
|
||||||
//if space detected first, meaning no tag in the input
|
//if space detected first, meaning no tag in the input
|
||||||
|
@ -1958,7 +1927,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
|
||||||
}
|
}
|
||||||
|
|
||||||
//Table name can only contain digits(0-9),alphebet(a-z),underscore(_)
|
//Table name can only contain digits(0-9),alphebet(a-z),underscore(_)
|
||||||
static int32_t isValidChildTableName(const char *pTbName, int16_t len) {
|
int32_t isValidChildTableName(const char *pTbName, int16_t len) {
|
||||||
const char *cur = pTbName;
|
const char *cur = pTbName;
|
||||||
for (int i = 0; i < len; ++i) {
|
for (int i = 0; i < len; ++i) {
|
||||||
if(!isdigit(cur[i]) && !isalpha(cur[i]) && (cur[i] != '_')) {
|
if(!isdigit(cur[i]) && !isalpha(cur[i]) && (cur[i] != '_')) {
|
||||||
|
@ -2146,14 +2115,14 @@ int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* faile
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]);
|
tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]);
|
||||||
destroySmlDataPoint(&point);
|
destroySmlDataPoint(&point);
|
||||||
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
return code;
|
||||||
} else {
|
} else {
|
||||||
tscDebug("SML:0x%"PRIx64" data point line parse success. line %d", info->id, i);
|
tscDebug("SML:0x%"PRIx64" data point line parse success. line %d", info->id, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayPush(points, &point);
|
taosArrayPush(points, &point);
|
||||||
}
|
}
|
||||||
return 0;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int taos_insert_lines(TAOS* taos, char* lines[], int numLines) {
|
int taos_insert_lines(TAOS* taos, char* lines[], int numLines) {
|
||||||
|
|
|
@ -0,0 +1,424 @@
|
||||||
|
#include <ctype.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include "hash.h"
|
||||||
|
#include "taos.h"
|
||||||
|
|
||||||
|
#include "tscUtil.h"
|
||||||
|
#include "tsclient.h"
|
||||||
|
#include "tscLog.h"
|
||||||
|
|
||||||
|
#include "tscParseLine.h"
|
||||||
|
|
||||||
|
#define MAX_TELNET_FILEDS_NUM 2
|
||||||
|
#define OTS_TIMESTAMP_COLUMN_NAME "ts"
|
||||||
|
#define OTS_METRIC_VALUE_COLUMN_NAME "value"
|
||||||
|
|
||||||
|
/* telnet style API parser */
|
||||||
|
static uint64_t HandleId = 0;
|
||||||
|
|
||||||
|
static uint64_t genUID() {
|
||||||
|
uint64_t id;
|
||||||
|
|
||||||
|
do {
|
||||||
|
id = atomic_add_fetch_64(&HandleId, 1);
|
||||||
|
} while (id == 0);
|
||||||
|
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, SSmlLinesInfo* info) {
|
||||||
|
const char *cur = *index;
|
||||||
|
uint16_t len = 0;
|
||||||
|
|
||||||
|
pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + 1, 1); // +1 to avoid 1772 line over write
|
||||||
|
if (pSml->stableName == NULL){
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
if (isdigit(*cur)) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Metric cannnot start with digit", info->id);
|
||||||
|
tfree(pSml->stableName);
|
||||||
|
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (*cur != '\0') {
|
||||||
|
if (len > TSDB_TABLE_NAME_LEN) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Metric cannot exceeds 193 characters", info->id);
|
||||||
|
tfree(pSml->stableName);
|
||||||
|
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*cur == ' ') {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
pSml->stableName[len] = *cur;
|
||||||
|
cur++;
|
||||||
|
len++;
|
||||||
|
}
|
||||||
|
if (len == 0 || *cur == '\0') {
|
||||||
|
tfree(pSml->stableName);
|
||||||
|
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
pSml->stableName[len] = '\0';
|
||||||
|
*index = cur + 1;
|
||||||
|
tscDebug("OTD:0x%"PRIx64" Stable name in metric:%s|len:%d", info->id, pSml->stableName, len);
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **index, SSmlLinesInfo* info) {
|
||||||
|
//Timestamp must be the first KV to parse
|
||||||
|
assert(*num_kvs == 0);
|
||||||
|
|
||||||
|
const char *start, *cur;
|
||||||
|
int32_t ret = TSDB_CODE_SUCCESS;
|
||||||
|
int len = 0;
|
||||||
|
char key[] = OTS_TIMESTAMP_COLUMN_NAME;
|
||||||
|
char *value = NULL;
|
||||||
|
|
||||||
|
start = cur = *index;
|
||||||
|
//allocate fields for timestamp and value
|
||||||
|
*pTS = tcalloc(MAX_TELNET_FILEDS_NUM, sizeof(TAOS_SML_KV));
|
||||||
|
|
||||||
|
while(*cur != '\0') {
|
||||||
|
if (*cur == ' ') {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
cur++;
|
||||||
|
len++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (len > 0 && *cur != '\0') {
|
||||||
|
value = tcalloc(len + 1, 1);
|
||||||
|
memcpy(value, start, len);
|
||||||
|
} else {
|
||||||
|
tfree(*pTS);
|
||||||
|
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = convertSmlTimeStamp(*pTS, value, len, info);
|
||||||
|
if (ret) {
|
||||||
|
tfree(value);
|
||||||
|
tfree(*pTS);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
tfree(value);
|
||||||
|
|
||||||
|
(*pTS)->key = tcalloc(sizeof(key), 1);
|
||||||
|
memcpy((*pTS)->key, key, sizeof(key));
|
||||||
|
|
||||||
|
*num_kvs += 1;
|
||||||
|
*index = cur + 1;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **index, SSmlLinesInfo* info) {
|
||||||
|
//skip timestamp
|
||||||
|
TAOS_SML_KV *pVal = *pKVs + 1;
|
||||||
|
const char *start, *cur;
|
||||||
|
int32_t ret = TSDB_CODE_SUCCESS;
|
||||||
|
int len = 0;
|
||||||
|
char key[] = OTS_METRIC_VALUE_COLUMN_NAME;
|
||||||
|
char *value = NULL;
|
||||||
|
|
||||||
|
start = cur = *index;
|
||||||
|
|
||||||
|
while(*cur != '\0') {
|
||||||
|
if (*cur == ' ') {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
cur++;
|
||||||
|
len++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (len > 0 && *cur != '\0') {
|
||||||
|
value = tcalloc(len + 1, 1);
|
||||||
|
memcpy(value, start, len);
|
||||||
|
} else {
|
||||||
|
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!convertSmlValueType(pVal, value, len, info)) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Failed to convert metric value string(%s) to any type",
|
||||||
|
info->id, value);
|
||||||
|
tfree(value);
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
tfree(value);
|
||||||
|
|
||||||
|
pVal->key = tcalloc(sizeof(key), 1);
|
||||||
|
memcpy(pVal->key, key, sizeof(key));
|
||||||
|
*num_kvs += 1;
|
||||||
|
|
||||||
|
*index = cur + 1;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) {
|
||||||
|
const char *cur = *index;
|
||||||
|
char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write
|
||||||
|
uint16_t len = 0;
|
||||||
|
|
||||||
|
//key field cannot start with digit
|
||||||
|
if (isdigit(*cur)) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Tag key cannnot start with digit", info->id);
|
||||||
|
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
||||||
|
}
|
||||||
|
while (*cur != '\0') {
|
||||||
|
if (len > TSDB_COL_NAME_LEN) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Tag key cannot exceeds 65 characters", info->id);
|
||||||
|
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
|
||||||
|
}
|
||||||
|
if (*cur == '=') {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
key[len] = *cur;
|
||||||
|
cur++;
|
||||||
|
len++;
|
||||||
|
}
|
||||||
|
if (len == 0 || *cur == '\0') {
|
||||||
|
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
||||||
|
}
|
||||||
|
key[len] = '\0';
|
||||||
|
|
||||||
|
if (checkDuplicateKey(key, pHash, info)) {
|
||||||
|
return TSDB_CODE_TSC_DUP_TAG_NAMES;
|
||||||
|
}
|
||||||
|
|
||||||
|
pKV->key = tcalloc(len + 1, 1);
|
||||||
|
memcpy(pKV->key, key, len + 1);
|
||||||
|
//tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
|
||||||
|
*index = cur + 1;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index,
|
||||||
|
bool *is_last_kv, SSmlLinesInfo* info) {
|
||||||
|
const char *start, *cur;
|
||||||
|
char *value = NULL;
|
||||||
|
uint16_t len = 0;
|
||||||
|
start = cur = *index;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
// ',' or '\0' identifies a value
|
||||||
|
if (*cur == ',' || *cur == '\0') {
|
||||||
|
// '\0' indicates end of value
|
||||||
|
*is_last_kv = (*cur == '\0') ? true : false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
cur++;
|
||||||
|
len++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (len == 0) {
|
||||||
|
tfree(pKV->key);
|
||||||
|
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
value = tcalloc(len + 1, 1);
|
||||||
|
memcpy(value, start, len);
|
||||||
|
value[len] = '\0';
|
||||||
|
if (!convertSmlValueType(pKV, value, len, info)) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Failed to convert sml value string(%s) to any type",
|
||||||
|
info->id, value);
|
||||||
|
//free previous alocated key field
|
||||||
|
tfree(pKV->key);
|
||||||
|
tfree(value);
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
tfree(value);
|
||||||
|
|
||||||
|
*index = (*cur == '\0') ? cur : cur + 1;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
|
||||||
|
const char **index, char **childTableName,
|
||||||
|
SHashObj *pHash, SSmlLinesInfo* info) {
|
||||||
|
const char *cur = *index;
|
||||||
|
int32_t ret = TSDB_CODE_SUCCESS;
|
||||||
|
TAOS_SML_KV *pkv;
|
||||||
|
bool is_last_kv = false;
|
||||||
|
|
||||||
|
int32_t capacity = 4;
|
||||||
|
*pKVs = tcalloc(capacity, sizeof(TAOS_SML_KV));
|
||||||
|
pkv = *pKVs;
|
||||||
|
|
||||||
|
while (*cur != '\0') {
|
||||||
|
ret = parseTelnetTagKey(pkv, &cur, pHash, info);
|
||||||
|
if (ret) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Unable to parse key", info->id);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
ret = parseTelnetTagValue(pkv, &cur, &is_last_kv, info);
|
||||||
|
if (ret) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Unable to parse value", info->id);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if ((strcasecmp(pkv->key, "ID") == 0) && pkv->type == TSDB_DATA_TYPE_BINARY) {
|
||||||
|
ret = isValidChildTableName(pkv->value, pkv->length);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
*childTableName = malloc(pkv->length + 1);
|
||||||
|
memcpy(*childTableName, pkv->value, pkv->length);
|
||||||
|
(*childTableName)[pkv->length] = '\0';
|
||||||
|
tfree(pkv->key);
|
||||||
|
tfree(pkv->value);
|
||||||
|
} else {
|
||||||
|
*num_kvs += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_last_kv) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
//reallocate addtional memory for more kvs
|
||||||
|
if ((*num_kvs + 1) > capacity) {
|
||||||
|
TAOS_SML_KV *more_kvs = NULL;
|
||||||
|
capacity *= 3; capacity /= 2;
|
||||||
|
more_kvs = realloc(*pKVs, capacity * sizeof(TAOS_SML_KV));
|
||||||
|
if (!more_kvs) {
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
*pKVs = more_kvs;
|
||||||
|
}
|
||||||
|
|
||||||
|
//move pKV points to next TAOS_SML_KV block
|
||||||
|
pkv = *pKVs + *num_kvs;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
|
||||||
|
const char* index = line;
|
||||||
|
int32_t ret = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
//Parse metric
|
||||||
|
ret = parseTelnetMetric(smlData, &index, info);
|
||||||
|
if (ret) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Unable to parse metric", info->id);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
tscDebug("OTD:0x%"PRIx64" Parse metric finished", info->id);
|
||||||
|
|
||||||
|
//Parse timestamp
|
||||||
|
ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &index, info);
|
||||||
|
if (ret) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Unable to parse timestamp", info->id);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
tscDebug("OTD:0x%"PRIx64" Parse timestamp finished", info->id);
|
||||||
|
|
||||||
|
//Parse value
|
||||||
|
ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &index, info);
|
||||||
|
if (ret) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Unable to parse metric value", info->id);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
tscDebug("OTD:0x%"PRIx64" Parse metric value finished", info->id);
|
||||||
|
|
||||||
|
//Parse tagKVs
|
||||||
|
SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
|
||||||
|
ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &index, &smlData->childTableName, keyHashTable, info);
|
||||||
|
if (ret) {
|
||||||
|
tscError("OTD:0x%"PRIx64" Unable to parse tags", info->id);
|
||||||
|
taosHashCleanup(keyHashTable);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
tscDebug("OTD:0x%"PRIx64" Parse tags finished", info->id);
|
||||||
|
taosHashCleanup(keyHashTable);
|
||||||
|
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tscParseTelnetLines(char* lines[], int numLines, SArray* points, SArray* failedLines, SSmlLinesInfo* info) {
|
||||||
|
for (int32_t i = 0; i < numLines; ++i) {
|
||||||
|
TAOS_SML_DATA_POINT point = {0};
|
||||||
|
int32_t code = tscParseTelnetLine(lines[i], &point, info);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
tscError("OTD:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]);
|
||||||
|
destroySmlDataPoint(&point);
|
||||||
|
return code;
|
||||||
|
} else {
|
||||||
|
tscDebug("OTD:0x%"PRIx64" data point line parse success. line %d", info->id, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPush(points, &point);
|
||||||
|
}
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int taos_insert_telnet_lines(TAOS* taos, char* lines[], int numLines) {
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo));
|
||||||
|
info->id = genUID();
|
||||||
|
|
||||||
|
if (numLines <= 0 || numLines > 65536) {
|
||||||
|
tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines numLines should be between 1 and 65536. numLines: %d", info->id, numLines);
|
||||||
|
tfree(info);
|
||||||
|
code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < numLines; ++i) {
|
||||||
|
if (lines[i] == NULL) {
|
||||||
|
tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines line %d is NULL", info->id, i);
|
||||||
|
tfree(info);
|
||||||
|
code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SArray* lpPoints = taosArrayInit(numLines, sizeof(TAOS_SML_DATA_POINT));
|
||||||
|
if (lpPoints == NULL) {
|
||||||
|
tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines failed to allocate memory", info->id);
|
||||||
|
tfree(info);
|
||||||
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines begin inserting %d lines, first line: %s", info->id, numLines, lines[0]);
|
||||||
|
code = tscParseTelnetLines(lines, numLines, lpPoints, NULL, info);
|
||||||
|
size_t numPoints = taosArrayGetSize(lpPoints);
|
||||||
|
|
||||||
|
if (code != 0) {
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints);
|
||||||
|
code = tscSmlInsert(taos, points, (int)numPoints, info);
|
||||||
|
if (code != 0) {
|
||||||
|
tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines error: %s", info->id, tstrerror((code)));
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines finish inserting %d lines. code: %d", info->id, numLines, code);
|
||||||
|
points = TARRAY_GET_START(lpPoints);
|
||||||
|
numPoints = taosArrayGetSize(lpPoints);
|
||||||
|
for (int i=0; i<numPoints; ++i) {
|
||||||
|
destroySmlDataPoint(points+i);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(lpPoints);
|
||||||
|
|
||||||
|
tfree(info);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
int taos_telnet_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint) {
|
||||||
|
SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo));
|
||||||
|
info->id = genUID();
|
||||||
|
int code = tscSmlInsert(taos, points, numPoint, info);
|
||||||
|
tfree(info);
|
||||||
|
return code;
|
||||||
|
}
|
|
@ -827,6 +827,16 @@ def taos_insert_lines(connection, lines):
|
||||||
if errno != 0:
|
if errno != 0:
|
||||||
raise LinesError("insert lines error", errno)
|
raise LinesError("insert lines error", errno)
|
||||||
|
|
||||||
|
def taos_insert_telnet_lines(connection, lines):
|
||||||
|
# type: (c_void_p, list[str] | tuple(str)) -> None
|
||||||
|
num_of_lines = len(lines)
|
||||||
|
lines = (c_char_p(line.encode("utf-8")) for line in lines)
|
||||||
|
lines_type = ctypes.c_char_p * num_of_lines
|
||||||
|
p_lines = lines_type(*lines)
|
||||||
|
errno = _libtaos.taos_insert_telnet_lines(connection, p_lines, num_of_lines)
|
||||||
|
if errno != 0:
|
||||||
|
raise LinesError("insert telnet lines error", errno)
|
||||||
|
|
||||||
|
|
||||||
class CTaosInterface(object):
|
class CTaosInterface(object):
|
||||||
def __init__(self, config=None):
|
def __init__(self, config=None):
|
||||||
|
|
|
@ -145,6 +145,15 @@ class TaosConnection(object):
|
||||||
"""
|
"""
|
||||||
return taos_insert_lines(self._conn, lines)
|
return taos_insert_lines(self._conn, lines)
|
||||||
|
|
||||||
|
def insert_telnet_lines(self, lines):
|
||||||
|
"""OpenTSDB telnet style API format support
|
||||||
|
|
||||||
|
## Example
|
||||||
|
cpu_load 1626056811855516532ns 2.0f32 id="tb1",host="host0",interface="eth0"
|
||||||
|
|
||||||
|
"""
|
||||||
|
return taos_insert_telnet_lines(self._conn, lines)
|
||||||
|
|
||||||
def cursor(self):
|
def cursor(self):
|
||||||
# type: () -> TaosCursor
|
# type: () -> TaosCursor
|
||||||
"""Return a new Cursor object using the connection."""
|
"""Return a new Cursor object using the connection."""
|
||||||
|
|
|
@ -172,6 +172,8 @@ DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList);
|
||||||
|
|
||||||
DLL_EXPORT int taos_insert_lines(TAOS* taos, char* lines[], int numLines);
|
DLL_EXPORT int taos_insert_lines(TAOS* taos, char* lines[], int numLines);
|
||||||
|
|
||||||
|
DLL_EXPORT int taos_insert_telnet_lines(TAOS* taos, char* lines[], int numLines);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -106,6 +106,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D) //"duplicated column names")
|
#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D) //"duplicated column names")
|
||||||
#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length")
|
#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length")
|
||||||
#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length")
|
#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length")
|
||||||
|
#define TSDB_CODE_TSC_DUP_TAG_NAMES TAOS_DEF_ERROR_CODE(0, 0x0220) //"duplicated tag names")
|
||||||
|
|
||||||
// mnode
|
// mnode
|
||||||
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed")
|
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed")
|
||||||
|
|
|
@ -147,6 +147,7 @@ typedef struct HttpContext {
|
||||||
int32_t state;
|
int32_t state;
|
||||||
uint8_t reqType;
|
uint8_t reqType;
|
||||||
uint8_t parsed;
|
uint8_t parsed;
|
||||||
|
uint8_t error;
|
||||||
char ipstr[22];
|
char ipstr[22];
|
||||||
char user[TSDB_USER_LEN]; // parsed from auth token or login message
|
char user[TSDB_USER_LEN]; // parsed from auth token or login message
|
||||||
char pass[HTTP_PASSWORD_LEN];
|
char pass[HTTP_PASSWORD_LEN];
|
||||||
|
|
|
@ -188,11 +188,12 @@ void httpCloseContextByApp(HttpContext *pContext) {
|
||||||
pContext->parsed = false;
|
pContext->parsed = false;
|
||||||
bool keepAlive = true;
|
bool keepAlive = true;
|
||||||
|
|
||||||
if (parser && parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) {
|
if (pContext->error == true) {
|
||||||
|
keepAlive = false;
|
||||||
|
} else if (parser && parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) {
|
||||||
keepAlive = false;
|
keepAlive = false;
|
||||||
} else if (parser && parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) {
|
} else if (parser && parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) {
|
||||||
keepAlive = false;
|
keepAlive = false;
|
||||||
} else {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (keepAlive) {
|
if (keepAlive) {
|
||||||
|
|
|
@ -663,7 +663,7 @@ static int32_t httpParserOnTarget(HttpParser *parser, HTTP_PARSER_STATE state, c
|
||||||
HttpContext *pContext = parser->pContext;
|
HttpContext *pContext = parser->pContext;
|
||||||
int32_t ok = 0;
|
int32_t ok = 0;
|
||||||
do {
|
do {
|
||||||
if (!isspace(c) && c != '\r' && c != '\n') {
|
if (!isspace(c)) {
|
||||||
if (httpAppendString(&parser->str, &c, 1)) {
|
if (httpAppendString(&parser->str, &c, 1)) {
|
||||||
httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
|
httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
|
||||||
ok = -1;
|
ok = -1;
|
||||||
|
@ -1157,6 +1157,10 @@ static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) {
|
||||||
httpOnError(parser, HTTP_CODE_INTERNAL_SERVER_ERROR, TSDB_CODE_HTTP_PARSE_ERROR_STATE);
|
httpOnError(parser, HTTP_CODE_INTERNAL_SERVER_ERROR, TSDB_CODE_HTTP_PARSE_ERROR_STATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ok != 0) {
|
||||||
|
pContext->error = true;
|
||||||
|
}
|
||||||
|
|
||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -645,7 +645,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
|
||||||
|
|
||||||
// todo memory leak if there are object with refcount greater than 0 in hash table?
|
// todo memory leak if there are object with refcount greater than 0 in hash table?
|
||||||
taosHashCleanup(pCacheObj->pHashTable);
|
taosHashCleanup(pCacheObj->pHashTable);
|
||||||
taosTrashcanEmpty(pCacheObj, false);
|
taosTrashcanEmpty(pCacheObj, true);
|
||||||
|
|
||||||
__cache_lock_destroy(pCacheObj);
|
__cache_lock_destroy(pCacheObj);
|
||||||
|
|
||||||
|
|
|
@ -112,9 +112,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too lon
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
|
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
|
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
|
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_COL_NAMES, "duplicated column names")
|
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_COL_NAMES, "duplicated column names")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TAG_LENGTH, "Invalid tag length")
|
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TAG_LENGTH, "Invalid tag length")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, "Invalid column length")
|
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, "Invalid column length")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_TAG_NAMES, "duplicated tag names")
|
||||||
|
|
||||||
// mnode
|
// mnode
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
FROM tdengine/tdengine-beta:latest
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
ARG MIRROR=archive.ubuntu.com
|
||||||
|
RUN sed -Ei 's/\w+.ubuntu.com/'${MIRROR}'/' /etc/apt/sources.list && apt update && apt install mono-devel -y
|
||||||
|
RUN apt-get install wget -y \
|
||||||
|
&& wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \
|
||||||
|
&& dpkg -i packages-microsoft-prod.deb \
|
||||||
|
&& rm packages-microsoft-prod.deb \
|
||||||
|
&& apt-get update && apt-get install -y dotnet-sdk-5.0
|
||||||
|
COPY ./*.cs *.csproj /tmp/
|
||||||
|
WORKDIR /tmp/
|
||||||
|
RUN dotnet build -c Release && cp bin/Release/net5.0/taosdemo bin/Release/net5.0/taosdemo.* /usr/local/bin/ && rm -rf /tmp/*
|
||||||
|
|
||||||
|
FROM tdengine/tdengine-beta:latest
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN apt-get update && apt-get install wget -y \
|
||||||
|
&& wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \
|
||||||
|
&& dpkg -i packages-microsoft-prod.deb \
|
||||||
|
&& rm packages-microsoft-prod.deb \
|
||||||
|
&& apt-get update && apt-get install -y dotnet-runtime-5.0
|
||||||
|
COPY --from=0 /usr/local/bin/taosdemo* /usr/local/bin/
|
||||||
|
CMD ["/usr/local/bin/taosdemo"]
|
|
@ -1,13 +1,41 @@
|
||||||
|
# C# Taosdemo
|
||||||
|
|
||||||
|
## For Mono
|
||||||
|
|
||||||
install build environment
|
install build environment
|
||||||
===
|
|
||||||
|
```sh
|
||||||
yum/apt install mono-complete
|
yum/apt install mono-complete
|
||||||
|
```
|
||||||
|
|
||||||
build C# version taosdemo
|
build C# version taosdemo.
|
||||||
===
|
|
||||||
|
```sh
|
||||||
mcs -out:taosdemo *.cs
|
mcs -out:taosdemo *.cs
|
||||||
|
./taosdemo --help
|
||||||
|
```
|
||||||
|
|
||||||
run C# version taosdemo
|
## For DotNet
|
||||||
===
|
|
||||||
|
install dotnet environment.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \
|
||||||
|
&& dpkg -i packages-microsoft-prod.deb \
|
||||||
|
&& rm packages-microsoft-prod.deb \
|
||||||
|
&& apt-get update && apt-get install -y dotnet-sdk-5.0
|
||||||
|
```
|
||||||
|
|
||||||
|
Build DotNet version taosdemo.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
dotnet build -c Release
|
||||||
|
./bin/Release/net5.0/taosdemo --help
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
Usage: mono taosdemo.exe [OPTION...]
|
Usage: mono taosdemo.exe [OPTION...]
|
||||||
|
|
||||||
--help Show usage.
|
--help Show usage.
|
||||||
|
@ -34,3 +62,4 @@ Usage: mono taosdemo.exe [OPTION...]
|
||||||
-v Print verbose output
|
-v Print verbose output
|
||||||
-g Print debug output
|
-g Print debug output
|
||||||
-y Skip read key for continous test, default is not skip
|
-y Skip read key for continous test, default is not skip
|
||||||
|
```
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
<Project Sdk="Microsoft.NET.Sdk">
|
||||||
|
|
||||||
|
<PropertyGroup>
|
||||||
|
<OutputType>Exe</OutputType>
|
||||||
|
<TargetFramework>net5.0</TargetFramework>
|
||||||
|
<GenerateAssemblyInfo>false</GenerateAssemblyInfo>
|
||||||
|
</PropertyGroup>
|
||||||
|
|
||||||
|
</Project>
|
|
@ -1,12 +1,15 @@
|
||||||
// sample code to verify all TDengine API
|
// sample code to verify all TDengine API
|
||||||
// to compile: gcc -o apitest apitest.c -ltaos
|
// to compile: gcc -o apitest apitest.c -ltaos
|
||||||
|
|
||||||
|
#include "taoserror.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <taos.h>
|
#include <taos.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
|
|
||||||
static void prepare_data(TAOS* taos) {
|
static void prepare_data(TAOS* taos) {
|
||||||
TAOS_RES *result;
|
TAOS_RES *result;
|
||||||
result = taos_query(taos, "drop database if exists test;");
|
result = taos_query(taos, "drop database if exists test;");
|
||||||
|
@ -1014,6 +1017,186 @@ int32_t verify_schema_less(TAOS* taos) {
|
||||||
return (code);
|
return (code);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void verify_telnet_insert(TAOS* taos) {
|
||||||
|
TAOS_RES *result;
|
||||||
|
|
||||||
|
result = taos_query(taos, "drop database if exists test;");
|
||||||
|
taos_free_result(result);
|
||||||
|
usleep(100000);
|
||||||
|
result = taos_query(taos, "create database db precision 'ms';");
|
||||||
|
taos_free_result(result);
|
||||||
|
usleep(100000);
|
||||||
|
|
||||||
|
(void)taos_select_db(taos, "db");
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
/* metric */
|
||||||
|
char* lines0[] = {
|
||||||
|
"stb0_0 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"",
|
||||||
|
"stb0_1 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"",
|
||||||
|
"stb0_2 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"",
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines0, 3);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* timestamp */
|
||||||
|
char* lines1[] = {
|
||||||
|
"stb1 1626006833s 1i8 host=\"host0\"",
|
||||||
|
"stb1 1626006833639000000ns 2i8 host=\"host0\"",
|
||||||
|
"stb1 1626006833640000us 3i8 host=\"host0\"",
|
||||||
|
"stb1 1626006833641123 4i8 host=\"host0\"",
|
||||||
|
"stb1 1626006833651ms 5i8 host=\"host0\"",
|
||||||
|
"stb1 0 6i8 host=\"host0\"",
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines1, 6);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* metric value */
|
||||||
|
//tinyin
|
||||||
|
char* lines2_0[] = {
|
||||||
|
"stb2_0 1626006833651ms -127i8 host=\"host0\"",
|
||||||
|
"stb2_0 1626006833652ms 127i8 host=\"host0\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_0, 2);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//smallint
|
||||||
|
char* lines2_1[] = {
|
||||||
|
"stb2_1 1626006833651ms -32767i16 host=\"host0\"",
|
||||||
|
"stb2_1 1626006833652ms 32767i16 host=\"host0\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_1, 2);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//int
|
||||||
|
char* lines2_2[] = {
|
||||||
|
"stb2_2 1626006833651ms -2147483647i32 host=\"host0\"",
|
||||||
|
"stb2_2 1626006833652ms 2147483647i32 host=\"host0\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_2, 2);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//bigint
|
||||||
|
char* lines2_3[] = {
|
||||||
|
"stb2_3 1626006833651ms -9223372036854775807i64 host=\"host0\"",
|
||||||
|
"stb2_3 1626006833652ms 9223372036854775807i64 host=\"host0\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_3, 2);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//float
|
||||||
|
char* lines2_4[] = {
|
||||||
|
"stb2_4 1626006833610ms 3f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833620ms -3f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833630ms 3.4f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833640ms -3.4f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833650ms 3.4E10f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833660ms -3.4e10f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833670ms 3.4E+2f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833680ms -3.4e-2f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833690ms 3.15 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833700ms 3.4E38f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833710ms -3.4E38f32 host=\"host0\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_4, 11);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//double
|
||||||
|
char* lines2_5[] = {
|
||||||
|
"stb2_5 1626006833610ms 3f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833620ms -3f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833630ms 3.4f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833640ms -3.4f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833650ms 3.4E10f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833660ms -3.4e10f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833670ms 3.4E+2f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833680ms -3.4e-2f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833690ms 1.7E308f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833700ms -1.7E308f64 host=\"host0\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_5, 10);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//bool
|
||||||
|
char* lines2_6[] = {
|
||||||
|
"stb2_6 1626006833610ms t host=\"host0\"",
|
||||||
|
"stb2_6 1626006833620ms T host=\"host0\"",
|
||||||
|
"stb2_6 1626006833630ms true host=\"host0\"",
|
||||||
|
"stb2_6 1626006833640ms True host=\"host0\"",
|
||||||
|
"stb2_6 1626006833650ms TRUE host=\"host0\"",
|
||||||
|
"stb2_6 1626006833660ms f host=\"host0\"",
|
||||||
|
"stb2_6 1626006833670ms F host=\"host0\"",
|
||||||
|
"stb2_6 1626006833680ms false host=\"host0\"",
|
||||||
|
"stb2_6 1626006833690ms False host=\"host0\"",
|
||||||
|
"stb2_6 1626006833700ms FALSE host=\"host0\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_6, 10);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//binary
|
||||||
|
char* lines2_7[] = {
|
||||||
|
"stb2_7 1626006833610ms \"binary_val.!@#$%^&*\" host=\"host0\"",
|
||||||
|
"stb2_7 1626006833620ms \"binary_val.:;,./?|+-=\" host=\"host0\"",
|
||||||
|
"stb2_7 1626006833630ms \"binary_val.()[]{}<>\" host=\"host0\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_7, 3);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//nchar
|
||||||
|
char* lines2_8[] = {
|
||||||
|
"stb2_8 1626006833610ms L\"nchar_val数值一\" host=\"host0\"",
|
||||||
|
"stb2_8 1626006833620ms L\"nchar_val数值二\" host=\"host0\"",
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines2_8, 2);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* tags */
|
||||||
|
//tag value types
|
||||||
|
char* lines3_0[] = {
|
||||||
|
"stb3_0 1626006833610ms 1 t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=3.4E38f32,t6=1.7E308f64,t7=true,t8=\"binary_val_1\",t9=L\"标签值1\"",
|
||||||
|
"stb3_0 1626006833610ms 2 t1=-127i8,t2=-32767i16,t3=-2147483647i32,t4=-9223372036854775807i64,t5=-3.4E38f32,t6=-1.7E308f64,t7=false,t8=\"binary_val_2\",t9=L\"标签值2\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines3_0, 2);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
//tag ID as child table name
|
||||||
|
char* lines3_1[] = {
|
||||||
|
"stb3_1 1626006833610ms 1 id=\"child_table1\",host=\"host1\"",
|
||||||
|
"stb3_1 1626006833610ms 2 host=\"host2\",iD=\"child_table2\"",
|
||||||
|
"stb3_1 1626006833610ms 3 ID=\"child_table3\",host=\"host3\""
|
||||||
|
};
|
||||||
|
code = taos_insert_telnet_lines(taos, lines3_1, 3);
|
||||||
|
if (code) {
|
||||||
|
printf("code: %d, %s.\n", code, tstrerror(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
int main(int argc, char *argv[]) {
|
||||||
const char* host = "127.0.0.1";
|
const char* host = "127.0.0.1";
|
||||||
const char* user = "root";
|
const char* user = "root";
|
||||||
|
@ -1034,6 +1217,8 @@ int main(int argc, char *argv[]) {
|
||||||
printf("************ verify schema-less *************\n");
|
printf("************ verify schema-less *************\n");
|
||||||
verify_schema_less(taos);
|
verify_schema_less(taos);
|
||||||
|
|
||||||
|
printf("************ verify telnet-insert *************\n");
|
||||||
|
verify_telnet_insert(taos);
|
||||||
|
|
||||||
printf("************ verify query *************\n");
|
printf("************ verify query *************\n");
|
||||||
verify_query(taos);
|
verify_query(taos);
|
||||||
|
@ -1051,7 +1236,7 @@ int main(int argc, char *argv[]) {
|
||||||
verify_prepare2(taos);
|
verify_prepare2(taos);
|
||||||
printf("************ verify prepare3 *************\n");
|
printf("************ verify prepare3 *************\n");
|
||||||
verify_prepare3(taos);
|
verify_prepare3(taos);
|
||||||
|
|
||||||
printf("************ verify stream *************\n");
|
printf("************ verify stream *************\n");
|
||||||
verify_stream(taos);
|
verify_stream(taos);
|
||||||
printf("done\n");
|
printf("done\n");
|
||||||
|
|
|
@ -15,6 +15,7 @@ import sys
|
||||||
from util.log import *
|
from util.log import *
|
||||||
from util.cases import *
|
from util.cases import *
|
||||||
from util.sql import *
|
from util.sql import *
|
||||||
|
from math import floor
|
||||||
|
|
||||||
|
|
||||||
class TDTestCase:
|
class TDTestCase:
|
||||||
|
@ -27,23 +28,22 @@ class TDTestCase:
|
||||||
|
|
||||||
sql = "select server_version()"
|
sql = "select server_version()"
|
||||||
ret = tdSql.query(sql)
|
ret = tdSql.query(sql)
|
||||||
version = tdSql.getData(0, 0)[0:3]
|
version = floor(float(tdSql.getData(0, 0)[0:3]))
|
||||||
expectedVersion_dev = "2.0"
|
expectedVersion = 2
|
||||||
expectedVersion_master = "2.1"
|
|
||||||
if(version == expectedVersion_dev or version == expectedVersion_master):
|
if(version == expectedVersion):
|
||||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version))
|
tdLog.info("sql:%s, row:%d col:%d data:%d == expect" % (sql, 0, 0, version))
|
||||||
else:
|
else:
|
||||||
tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master))
|
tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion))
|
||||||
|
|
||||||
sql = "select client_version()"
|
sql = "select client_version()"
|
||||||
ret = tdSql.query(sql)
|
ret = tdSql.query(sql)
|
||||||
version = tdSql.getData(0, 0)[0:3]
|
version = floor(float(tdSql.getData(0, 0)[0:3]))
|
||||||
expectedVersion_dev = "2.0"
|
expectedVersion = 2
|
||||||
expectedVersion_master = "2.1"
|
if(version == expectedVersion):
|
||||||
if(version == expectedVersion_dev or version == expectedVersion_master):
|
tdLog.info("sql:%s, row:%d col:%d data:%d == expect" % (sql, 0, 0, version))
|
||||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version))
|
|
||||||
else:
|
else:
|
||||||
tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master))
|
tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion))
|
||||||
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
|
|
@ -0,0 +1,313 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2021 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self._conn = conn
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
print("running {}".format(__file__))
|
||||||
|
tdSql.execute("drop database if exists test")
|
||||||
|
tdSql.execute("create database if not exists test precision 'us'")
|
||||||
|
tdSql.execute('use test')
|
||||||
|
|
||||||
|
|
||||||
|
### metric ###
|
||||||
|
print("============= step1 : test metric ================")
|
||||||
|
lines0 = [
|
||||||
|
"stb0_0 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"",
|
||||||
|
"stb0_1 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"",
|
||||||
|
"stb0_2 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"",
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines0)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("show stables")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
|
||||||
|
tdSql.query("describe stb0_0")
|
||||||
|
tdSql.checkRows(4)
|
||||||
|
|
||||||
|
tdSql.query("describe stb0_1")
|
||||||
|
tdSql.checkRows(4)
|
||||||
|
|
||||||
|
tdSql.query("describe stb0_2")
|
||||||
|
tdSql.checkRows(4)
|
||||||
|
|
||||||
|
### timestamp ###
|
||||||
|
print("============= step2 : test timestamp ================")
|
||||||
|
lines1 = [
|
||||||
|
"stb1 1626006833s 1i8 host=\"host0\"",
|
||||||
|
"stb1 1626006833639000000ns 2i8 host=\"host0\"",
|
||||||
|
"stb1 1626006833640000us 3i8 host=\"host0\"",
|
||||||
|
"stb1 1626006833641123 4i8 host=\"host0\"",
|
||||||
|
"stb1 1626006833651ms 5i8 host=\"host0\"",
|
||||||
|
"stb1 0 6i8 host=\"host0\"",
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines1)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb1")
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
|
||||||
|
### metric value ###
|
||||||
|
print("============= step3 : test metric value ================")
|
||||||
|
|
||||||
|
#tinyint
|
||||||
|
lines2_0 = [
|
||||||
|
"stb2_0 1626006833651ms -127i8 host=\"host0\"",
|
||||||
|
"stb2_0 1626006833652ms 127i8 host=\"host0\""
|
||||||
|
]
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_0)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_0")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_0")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "TINYINT")
|
||||||
|
|
||||||
|
#smallint
|
||||||
|
lines2_1 = [
|
||||||
|
"stb2_1 1626006833651ms -32767i16 host=\"host0\"",
|
||||||
|
"stb2_1 1626006833652ms 32767i16 host=\"host0\""
|
||||||
|
]
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_1)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_1")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_1")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "SMALLINT")
|
||||||
|
|
||||||
|
#int
|
||||||
|
lines2_2 = [
|
||||||
|
"stb2_2 1626006833651ms -2147483647i32 host=\"host0\"",
|
||||||
|
"stb2_2 1626006833652ms 2147483647i32 host=\"host0\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_2)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_2")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_2")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "INT")
|
||||||
|
|
||||||
|
#bigint
|
||||||
|
lines2_3 = [
|
||||||
|
"stb2_3 1626006833651ms -9223372036854775807i64 host=\"host0\"",
|
||||||
|
"stb2_3 1626006833652ms 9223372036854775807i64 host=\"host0\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_3)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_3")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_3")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "BIGINT")
|
||||||
|
|
||||||
|
#float
|
||||||
|
lines2_4 = [
|
||||||
|
"stb2_4 1626006833610ms 3f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833620ms -3f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833630ms 3.4f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833640ms -3.4f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833650ms 3.4E10f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833660ms -3.4e10f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833670ms 3.4E+2f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833680ms -3.4e-2f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833690ms 3.15 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833700ms 3.4E38f32 host=\"host0\"",
|
||||||
|
"stb2_4 1626006833710ms -3.4E38f32 host=\"host0\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_4)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_4")
|
||||||
|
tdSql.checkRows(11)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_4")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "FLOAT")
|
||||||
|
|
||||||
|
#double
|
||||||
|
lines2_5 = [
|
||||||
|
"stb2_5 1626006833610ms 3f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833620ms -3f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833630ms 3.4f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833640ms -3.4f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833650ms 3.4E10f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833660ms -3.4e10f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833670ms 3.4E+2f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833680ms -3.4e-2f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833690ms 1.7E308f64 host=\"host0\"",
|
||||||
|
"stb2_5 1626006833700ms -1.7E308f64 host=\"host0\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_5)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_5")
|
||||||
|
tdSql.checkRows(10)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_5")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "DOUBLE")
|
||||||
|
|
||||||
|
#bool
|
||||||
|
lines2_6 = [
|
||||||
|
"stb2_6 1626006833610ms t host=\"host0\"",
|
||||||
|
"stb2_6 1626006833620ms T host=\"host0\"",
|
||||||
|
"stb2_6 1626006833630ms true host=\"host0\"",
|
||||||
|
"stb2_6 1626006833640ms True host=\"host0\"",
|
||||||
|
"stb2_6 1626006833650ms TRUE host=\"host0\"",
|
||||||
|
"stb2_6 1626006833660ms f host=\"host0\"",
|
||||||
|
"stb2_6 1626006833670ms F host=\"host0\"",
|
||||||
|
"stb2_6 1626006833680ms false host=\"host0\"",
|
||||||
|
"stb2_6 1626006833690ms False host=\"host0\"",
|
||||||
|
"stb2_6 1626006833700ms FALSE host=\"host0\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_6)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_6")
|
||||||
|
tdSql.checkRows(10)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_6")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "BOOL")
|
||||||
|
|
||||||
|
#binary
|
||||||
|
lines2_7 = [
|
||||||
|
"stb2_7 1626006833610ms \"binary_val.!@#$%^&*\" host=\"host0\"",
|
||||||
|
"stb2_7 1626006833620ms \"binary_val.:;,./?|+-=\" host=\"host0\"",
|
||||||
|
"stb2_7 1626006833630ms \"binary_val.()[]{}<>\" host=\"host0\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_7)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_7")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_7")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "BINARY")
|
||||||
|
|
||||||
|
#nchar
|
||||||
|
lines2_8 = [
|
||||||
|
"stb2_8 1626006833610ms L\"nchar_val数值一\" host=\"host0\"",
|
||||||
|
"stb2_8 1626006833620ms L\"nchar_val数值二\" host=\"host0\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines2_8)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2_8")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query("describe stb2_8")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(1, 1, "NCHAR")
|
||||||
|
|
||||||
|
### tags ###
|
||||||
|
print("============= step3 : test tags ================")
|
||||||
|
#tag value types
|
||||||
|
lines3_0 = [
|
||||||
|
"stb3_0 1626006833610ms 1 t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=3.4E38f32,t6=1.7E308f64,t7=true,t8=\"binary_val_1\",t9=L\"标签值1\"",
|
||||||
|
"stb3_0 1626006833610ms 2 t1=-127i8,t2=-32767i16,t3=-2147483647i32,t4=-9223372036854775807i64,t5=-3.4E38f32,t6=-1.7E308f64,t7=false,t8=\"binary_val_2\",t9=L\"标签值2\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines3_0)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb3_0")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query("describe stb3_0")
|
||||||
|
tdSql.checkRows(11)
|
||||||
|
|
||||||
|
tdSql.checkData(2, 1, "TINYINT")
|
||||||
|
tdSql.checkData(2, 3, "TAG")
|
||||||
|
|
||||||
|
tdSql.checkData(3, 1, "SMALLINT")
|
||||||
|
tdSql.checkData(3, 3, "TAG")
|
||||||
|
|
||||||
|
tdSql.checkData(4, 1, "INT")
|
||||||
|
tdSql.checkData(4, 3, "TAG")
|
||||||
|
|
||||||
|
tdSql.checkData(5, 1, "BIGINT")
|
||||||
|
tdSql.checkData(5, 3, "TAG")
|
||||||
|
|
||||||
|
tdSql.checkData(6, 1, "FLOAT")
|
||||||
|
tdSql.checkData(6, 3, "TAG")
|
||||||
|
|
||||||
|
tdSql.checkData(7, 1, "DOUBLE")
|
||||||
|
tdSql.checkData(7, 3, "TAG")
|
||||||
|
|
||||||
|
tdSql.checkData(8, 1, "BOOL")
|
||||||
|
tdSql.checkData(8, 3, "TAG")
|
||||||
|
|
||||||
|
tdSql.checkData(9, 1, "BINARY")
|
||||||
|
tdSql.checkData(9, 3, "TAG")
|
||||||
|
|
||||||
|
tdSql.checkData(10, 1, "NCHAR")
|
||||||
|
tdSql.checkData(10, 3, "TAG")
|
||||||
|
|
||||||
|
|
||||||
|
#tag ID as child table name
|
||||||
|
lines3_1 = [
|
||||||
|
"stb3_1 1626006833610ms 1 id=\"child_table1\",host=\"host1\"",
|
||||||
|
"stb3_1 1626006833610ms 2 host=\"host2\",iD=\"child_table2\"",
|
||||||
|
"stb3_1 1626006833610ms 3 ID=\"child_table3\",host=\"host3\""
|
||||||
|
]
|
||||||
|
|
||||||
|
code = self._conn.insert_telnet_lines(lines3_1)
|
||||||
|
print("insert_telnet_lines result {}".format(code))
|
||||||
|
|
||||||
|
tdSql.query("select * from stb3_1")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
|
||||||
|
tdSql.query("show tables like \"child%\"")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, "child_table1")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
File diff suppressed because it is too large
Load Diff
|
@ -13,6 +13,8 @@
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import taos
|
import taos
|
||||||
|
import string
|
||||||
|
import random
|
||||||
from util.log import *
|
from util.log import *
|
||||||
from util.cases import *
|
from util.cases import *
|
||||||
from util.sql import *
|
from util.sql import *
|
||||||
|
@ -23,6 +25,11 @@ class TDTestCase:
|
||||||
tdLog.debug("start to execute %s" % __file__)
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
tdSql.init(conn.cursor())
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
def get_random_string(self, length):
|
||||||
|
letters = string.ascii_lowercase
|
||||||
|
result_str = ''.join(random.choice(letters) for i in range(length))
|
||||||
|
return result_str
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
|
@ -186,6 +193,20 @@ class TDTestCase:
|
||||||
tdSql.query("select t1.ts from t0,t1 where t0.ts = t1.ts")
|
tdSql.query("select t1.ts from t0,t1 where t0.ts = t1.ts")
|
||||||
tdSql.checkData(0,0,'2018-10-03 14:38:05.000000')
|
tdSql.checkData(0,0,'2018-10-03 14:38:05.000000')
|
||||||
|
|
||||||
|
#TD-6425 join result more than 1MB
|
||||||
|
tdSql.execute("create database test_join")
|
||||||
|
tdSql.execute("use test_join")
|
||||||
|
|
||||||
|
ts = 1538548685000
|
||||||
|
tdSql.execute("create table stb(ts timestamp, c1 nchar(200)) tags(id int, loc binary(20))")
|
||||||
|
for i in range(2):
|
||||||
|
tdSql.execute("create table tb%d using stb tags(1, 'city%d')" % (i, i))
|
||||||
|
for j in range(1000):
|
||||||
|
tdSql.execute("insert into tb%d values(%d, '%s')" % (i, ts + j, self.get_random_string(200)))
|
||||||
|
|
||||||
|
tdSql.query("select tb0.c1, tb1.c1 from tb0, tb1 where tb0.ts = tb1.ts")
|
||||||
|
tdSql.checkRows(1000)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
tdLog.success("%s successfully executed" % __file__)
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
Loading…
Reference in New Issue