Merge branch 'develop' into feature/TD-2581

This commit is contained in:
dapan1121 2021-06-18 16:55:33 +08:00
commit a8d3e9559d
194 changed files with 11766 additions and 6473 deletions

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.30.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.31.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -15,7 +15,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
* [命令行程序TAOS](/getting-started#console)访问TDengine的简便方式
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
* [支持平台列表](/getting-started#platforms)TDengine服务器和客户端支持的平台列表
* [Kubenetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html)TDengine在Kubenetes环境进行部署的详细说明
* [Kubernetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html)TDengine在Kubernetes环境进行部署的详细说明
## [整体架构](/architecture)
@ -81,7 +81,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
## [与其他工具的连接](/connections)
* [Grafana](/connections#grafana)获取并可视化保存在TDengine的数据
* [Matlab](/connections#matlab)通过配置Matlab的JDBC数据源访问保存在TDengine的数据
* [MATLAB](/connections#matlab)通过配置MATLAB的JDBC数据源访问保存在TDengine的数据
* [R](/connections#r)通过配置R的JDBC数据源访问保存在TDengine的数据
* [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html)通过IDEA 数据库管理工具可视化使用 TDengine

View File

@ -9,8 +9,8 @@ TDengine的模块之一是时序数据库。但除此之外为减少研发的
* __10倍以上的性能提升__定义了创新的数据存储结构单核每秒能处理至少2万次请求插入数百万个数据点读出一千万以上数据点比现有通用数据库快十倍以上。
* __硬件或云服务成本降至1/5__由于超强性能计算资源不到通用大数据方案的1/5通过列式存储和先进的压缩算法存储空间不到通用数据库的1/10。
* __全栈时序数据处理引擎__将数据库、消息队列、缓存、流式计算等功能融为一体应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__无论是十年前还是一秒钟前的数据指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
* __与第三方工具无缝连接__不用一行代码即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __强大的分析功能__无论是十年前还是一秒钟前的数据指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, MATLAB随时进行。
* __与第三方工具无缝连接__不用一行代码即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __零运维成本、零学习成本__安装集群简单快捷无需分库分表实时备份。类似标准SQL支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似零学习成本。
采用TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是因充分利用了物联网时序数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。

View File

@ -56,7 +56,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
*taos.tar.gz*:应用驱动安装包
*driver*TDengine应用驱动driver
*connector*: 各种编程语言连接器go/grafanaplugin/nodejs/python/JDBC
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/matlab/python/R)
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R)
运行install_client.sh进行安装

View File

@ -75,17 +75,17 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tde
![img](page://images/connections/import_dashboard2.jpg)
## <a class="anchor" id="matlab"></a>Matlab
## <a class="anchor" id="matlab"></a>MATLAB
MatLab可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。
MATLAB可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。
### MatLab的JDBC接口适配
### MATLAB的JDBC接口适配
MatLab的适配有下面几个步骤下面以Windows10上适配MatLab2017a为例
MATLAB的适配有下面几个步骤下面以Windows10上适配MATLAB2017a为例
- 将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox
- 将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64
- 将新添加的驱动jar包加入MatLab的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行
- 将新添加的驱动jar包加入MATLAB的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行
```
$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
@ -96,9 +96,9 @@ $matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
C:\Windows\System32
```
### 在MatLab中连接TDengine获取数据
### 在MATLAB中连接TDengine获取数据
在成功进行了上述配置后打开MatLab
在成功进行了上述配置后打开MATLAB
- 创建一个连接:

View File

@ -116,20 +116,22 @@ taosd -C
**注意:**对于端口TDengine会使用从serverPort起13个连续的TCP和UDP端口号请务必在防火墙打开。因此如果是缺省配置需要打开从6030到6042共13个端口而且必须TCP和UDP都打开。详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
不同应用场景的数据往往具有不同的数据特征比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率TDengine提供如下存储相关的系统配置参数
不同应用场景的数据往往具有不同的数据特征比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率TDengine提供如下存储相关的系统配置参数(既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值)
- days一个数据文件存储数据的时间跨度单位为天默认值10。
- keep数据库中数据保留的天数单位为天默认值3650。可通过 alter database 修改)
- minRows文件块中记录的最小条数单位为条默认值100。
- maxRows文件块中记录的最大条数单位为条默认值4096。
- comp文件压缩标志位0关闭1一阶段压缩2两阶段压缩。默认值2。可通过 alter database 修改)
- walLevelWAL级别。1写wal但不执行fsync2写wal, 而且执行fsync。默认值1。
- fsync当wal设置为2时执行fsync的周期。设置为0表示每次写入立即执行fsync。单位为毫秒默认值3000。
- cache内存块的大小单位为兆字节MB默认值16。
- days一个数据文件存储数据的时间跨度单位为天默认值10。
- keep数据库中数据保留的天数单位为天默认值3650。可通过 alter database 修改)
- minRows文件块中记录的最小条数单位为条默认值100。
- maxRows文件块中记录的最大条数单位为条默认值4096。
- comp文件压缩标志位0关闭1一阶段压缩2两阶段压缩。默认值2。可通过 alter database 修改)
- walWAL级别。1写wal但不执行fsync2写wal, 而且执行fsync。默认值1。(在 taos.cfg 中参数名需要写作 walLevel可通过 alter database 修改)
- fsync当wal设置为2时执行fsync的周期。设置为0表示每次写入立即执行fsync。单位为毫秒默认值3000。(可通过 alter database 修改)
- cache内存块的大小单位为兆字节MB默认值16。
- blocks每个VNODETSDB中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为cache * blocks。单位为块默认值4。可通过 alter database 修改)
- replica副本个数取值范围1-3。单位为个默认值1。可通过 alter database 修改)
- precision时间戳精度标识ms表示毫秒us表示微秒。默认值ms。
- cacheLast是否在内存中缓存子表的最近数据0关闭1缓存子表最近一行数据2缓存子表每一列的最近的非NULL值3同时打开缓存最近行和列功能默认值0。可通过 alter database 修改)(从 2.0.11 版本开始支持此参数)
- replica副本个数。取值范围1-3单位为个默认值1。可通过 alter database 修改)
- quorum多副本环境下指令执行的确认数要求。取值范围1、2单位为个默认值1。可通过 alter database 修改)
- precision时间戳精度标识。ms表示毫秒us表示微秒默认值ms。2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- cacheLast是否在内存中缓存子表的最近数据。0关闭1缓存子表最近一行数据2缓存子表每一列的最近的非NULL值3同时打开缓存最近行和列功能。默认值0。可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 03 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- update是否允许更新。0不允许1允许。默认值0。可通过 alter database 修改)
对于一个应用场景可能有多种数据特征的数据并存最佳的设计是将具有相同数据特征的表放在一个库里这样一个应用有多个库而每个库可以配置不同的存储参数从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数如果指定该参数就将覆盖对应的系统配置参数。举例有下述SQL
@ -142,7 +144,6 @@ taosd -C
TDengine集群中加入一个新的dnode时涉及集群相关的一些参数必须与已有集群的配置相同否则不能成功加入到集群中。会进行校验的参数如下
- numOfMnodes系统中管理节点个数。默认值3。
- balance是否启动负载均衡。01是。默认值1。
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值4。
- offlineThreshold: dnode离线阈值超过该时间将导致该dnode从集群中删除。单位为秒默认值86400*10即10天
- statusInterval: dnode向mnode报告状态时长。单位为秒默认值1。
@ -150,6 +151,10 @@ TDengine集群中加入一个新的dnode时涉及集群相关的一些参数
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
- arbitrator: 系统中裁决器的end point缺省为空。
- timezone、locale、charset 的配置见客户端配置。2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致)
- balance是否启用负载均衡。01是。默认值1。
- flowctrl是否启用非阻塞流控。01是。默认值1。
- slaveQuery是否启用 slave vnode 参与查询。01是。默认值1。
- adjustMaster是否启用 vnode master 负载均衡。01是。默认值1。
为方便调试可通过SQL语句临时调整每个dnode的日志配置系统重启后会失效
@ -444,7 +449,7 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符
- 表的列名:不能包含特殊字符,不能超过 64 个字符
- 数据库名、表名、列名,都不能以数字开头
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
- 表的列数:不能超过 1024 列
- 记录的最大长度:包括时间戳 8 byte不能超过 16KB每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
- 单条 SQL 语句默认最大字符串长度65480 byte

View File

@ -126,9 +126,25 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
```mysql
ALTER DATABASE db_name CACHELAST 0;
```
CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11.0 版本开始支持。从 2.1.1.0 版本开始,修改此参数后无需重启服务器即可生效。)
CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值为 0取值范围 [0, 1, 2, 3]。其中 0 表示不缓存1 表示缓存子表最近一行数据2 表示缓存子表每一列的最近的非 NULL 值3 表示同时打开缓存最近行和列功能。(从 2.0.11.0 版本开始支持参数值 [0, 1],从 2.1.2.0 版本开始支持参数值 [0, 1, 2, 3]。)
说明:缓存最近行,将显著改善 LAST_ROW 函数的性能表现;缓存每列的最近非 NULL 值将显著改善无特殊影响WHERE、ORDER BY、GROUP BY、INTERVAL下的 LAST 函数的性能表现。
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
```mysql
ALTER DATABASE db_name WAL 1;
```
WAL 参数控制 WAL 日志的落盘方式。缺省值为 1取值范围为 [1, 2]。1 表示写 WAL但不执行 fsync2 表示写 WAL而且执行 fsync。
```mysql
ALTER DATABASE db_name FSYNC 3000;
```
FSYNC 参数控制执行 fsync 操作的周期。缺省值为 3000单位是毫秒取值范围为 [0, 180000]。如果设置为 0表示每次写入立即执行 fsync。该设置项主要用于调节 WAL 参数设为 2 时的系统行为。
```mysql
ALTER DATABASE db_name UPDATE 0;
```
UPDATE 参数控制是否允许更新数据。缺省值为 0取值范围为 [0, 1]。0 表示会直接丢弃后写入的相同时间戳的数据1 表示会使用后写入的数据覆盖已有的相同时间戳的数据。
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外从 2.1.1.0 版本开始,修改这些参数后无需重启服务器即可生效。
- **显示系统所有数据库**
@ -681,9 +697,10 @@ Query OK, 1 row(s) in set (0.001091s)
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12))
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12))
4. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
<!--
<a class="anchor" id="having"></a>
@ -1178,9 +1195,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
适用于:**表**。
适用于:**表、(超级表)**。
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。从 2.1.3.0 版本开始DIFF 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname
示例:
```mysql

View File

@ -0,0 +1,142 @@
# TDengine Documentation
TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Model and Data Modeling sections. In addition to this document, you should also download and read our technology white paper. For the older TDengine version 1.6 documentation, please click here.
## [TDengine Introduction](/evaluation)
* [TDengine Introduction and Features](/evaluation#intro)
* [TDengine Use Scenes](/evaluation#scenes)
* [TDengine Performance Metrics and Verification]((/evaluation#))
## [Getting Started](/getting-started)
* [Quickly Install](/getting-started#install): install via source code/package / Docker within seconds
- [Easy to Launch](/getting-started#start): start / stop TDengine with systemctl
- [Command-line](/getting-started#console) : an easy way to access TDengine server
- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed
- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client
- [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html)a detailed guide for TDengine deployment in Kubernetes environment
## [Overall Architecture](/architecture)
- [Data Model](/architecture#model): relational database model, but one table for one device with static tags
- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL, support scale-out and high-reliability
- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data will be separated from time-series data, segmented by vnode and time
- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement is sent back to client, while supporting multi-replicas
- [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio
- [Data Query](/architecture#query): support various functions, time-axis aggregation, interpolation, and multi-table aggregation
## [Data Modeling](/model)
- [Create a Database](/model#create-db): create a database for all data collection points with similar features
- [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type
- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point
## [TAOS SQL](/taos-sql)
- [Data Types](/taos-sql#data-type): support timestamp, int, float, nchar, bool, and other types
- [Database Management](/taos-sql#management): add, drop, check databases
- [Table Management](/taos-sql#table): add, drop, check, alter tables
- [STable Management](/taos-sql#super-table): add, drop, check, alter STables
- [Tag Management](/taos-sql#tags): add, drop, alter tags
- [Inserting Records](/taos-sql#insert): support to write single/multiple items per table, multiple items across tables, and support to write historical data
- [Data Query](/taos-sql#select): support time segment, value filtering, sorting, manual paging of query results, etc
- [SQL Function](/taos-sql#functions): support various aggregation functions, selection functions, and calculation functions, such as avg, min, diff, etc
- [Time Dimensions Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment
- [Boundary Restrictions](/taos-sql#limitation): restrictions for the library, table, SQL, and others
- [Error Code](/taos-sql/error-code): TDengine 2.0 error codes and corresponding decimal codes
## [Efficient Data Ingestion](/insert)
- [SQL Ingestion](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command
- [Prometheus Ingestion](/insert#prometheus): Configure Prometheus to write data directly without any code
- [Telegraf Ingestion](/insert#telegraf): Configure Telegraf to write collected data directly without any code
- [EMQ X Broker](/insert#emq): Configure EMQ X to write MQTT data directly without any code
- [HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code
## [Efficient Data Querying](/queries)
- [Main Query Features](/queries#queries): support various standard functions, setting filter conditions, and querying per time segment
- [Multi-table Aggregation Query](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation queries
- [Downsampling to Query Value](/queries#sampling): aggregate data in successive time windows, support interpolation
## [Advanced Features](/advanced-features)
- [Continuous Query](/advanced-features#continuous-query): Based on sliding windows, the data stream is automatically queried and calculated at regular intervals
- [Data Publisher/Subscriber](/advanced-features#subscribe): subscribe to the newly arrived data like a typical messaging system
- [Cache](/advanced-features#cache): the newly arrived data of each device/table will always be cached
- [Alarm Monitoring](/advanced-features#alert): automatically monitor out-of-threshold data, and actively push it based-on configuration rules
## [Connector](/connector)
- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
- [Java Connector(JDBC)]: driver for connecting to the server from Java applications using the JDBC API
- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications
- [Node.js Connector](/connector#nodejs): driver for connecting to TDengine server from Node.js applications
- [C# Connector](/connector#csharp): driver for connecting to TDengine server from C# applications
- [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment
## [Connections with Other Tools](/connections)
- [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization
- [MATLAB](/connections#matlab): access data stored in TDengine server via JDBC configured within MATLAB
- [R](/connections#r): access data stored in TDengine server via JDBC configured within R
- [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool
## [Installation and Management of TDengine Cluster](/cluster)
- [Preparation](/cluster#prepare): important considerations before deploying TDengine for production usage
- [Create Your First Node](/cluster#node-one): simple to follow the quick setup
- [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster
- [Node Management](/cluster#management): add, delete, and check nodes in the cluster
- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through multi-replicas
- [Mnode Management](/cluster#mnode): automatic system creation without any manual intervention
- [Load Balancing](/cluster#load-balancing): automatically performed once the number of nodes or load changes
- [Offline Node Processing](/cluster#offline): any node that offline for more than a certain period will be removed from the cluster
- [Arbitrator](/cluster#arbitrator): used in the case of an even number of replicas to prevent split-brain
## [TDengine Operation and Maintenance](/administrator)
- [Capacity Planning](/administrator#planning): Estimating hardware resources based on scenarios
- [Fault Tolerance and Disaster Recovery](/administrator#tolerance): set the correct WAL and number of data replicas
- [System Configuration](/administrator#config): port, cache size, file block size, and other system configurations
- [User Management](/administrator#user): add/delete TDengine users, modify user password
- [Import Data](/administrator#import): import data into TDengine from either script or CSV file
- [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool
- [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events
- [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located
- [Parameter Restrictions and Reserved Keywords](/administrator#keywords): TDengines list of parameter restrictions and reserved keywords
## TDengine Technical Design
- [System Module]: taosd functions and modules partitioning
- [Data Replication]: support real-time synchronous/asynchronous replication, to ensure high-availability of the system
- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles
## Common Tools
- [TDengine sample import tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html)
## Performance: TDengine vs Others
- [Performance: TDengine vs InfluxDB with InfluxDBs open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html)
- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html)
- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/08/14/573.html)
- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/07/19/419.html)
- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
## More on IoT Big Data
- [Characteristics of IoT and Industry Internet Big Data](https://www.taosdata.com/blog/2019/07/09/characteristics-of-iot-big-data/)
- [Features and Functions of IoT Big Data platforms](https://www.taosdata.com/blog/2019/07/29/542.html)
- [Why dont General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/)
- [Why TDengine is the best choice for IoT, Internet of Vehicles, and Industry Internet Big Data platforms?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/)
## FAQ
- [FAQ: Common questions and answers](/faq)

View File

@ -0,0 +1,65 @@
# TDengine Introduction
## <a class="anchor" id="intro"></a> About TDengine
TDengine is an innovative Big Data processing product launched by Taos Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space.
One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics:
- **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database.
- **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengines computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases.
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB.
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost.
With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
![TDengine Technology Ecosystem](page://images/eco_system.png)
<center>Figure 1. TDengine Technology Ecosystem</center>
## <a class="anchor" id="scenes"></a>Overall Scenarios of TDengine
As an IoT Big Data platform, the typical application scenarios of TDengine are mainly presented in the IoT category, with users having a certain amount of data. The following sections of this document are mainly aimed at IoT-relevant systems. Other systems, such as CRM, ERP, etc., are beyond the scope of this article.
### Characteristics and Requirements of Data Sources
From the perspective of data sources, designers can analyze the applicability of TDengine in target application systems as following.
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
| A huge amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure matching high compression ratio to achieve the best storage efficiency in the industry. |
| Data input velocity is occasionally or continuously huge | | | √ | TDengine's performance is much higher than other similar products. It can continuously process a large amount of input data in the same hardware environment, and provide a performance evaluation tool that can easily run in the user environment. |
| A huge amount of data sources | | | √ | TDengine is designed to include optimizations specifically for a huge amount of data sources, such as data writing and querying, which is especially suitable for efficiently processing massive (tens of millions or more) data sources. |
### System Architecture Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require a simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions, and no need to integrate any additional third-party products. |
| Require fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability functions such as fault tolerance and disaster recovery. |
| Standardization specifications | | | √ | TDengine uses standard SQL language to provide main functions and follow standardization specifications. |
### System Function Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require completed data processing algorithms built-in | | √ | | TDengine implements various general data processing algorithms, but has not properly handled all requirements of different industries, so special types of processing shall be processed at the application level. |
| Require a huge amount of crosstab queries | | √ | | This type of processing should be handled more by relational database systems, or TDengine and relational database systems should fit together to implement system functions. |
### System Performance Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require larger total processing capacity | | | √ | TDengines cluster functions can easily improve processing capacity via multi-server-cooperating. |
| Require high-speed data processing | | | √ | TDengines storage and data processing are designed to be optimized for IoT, can generally improve the processing speed by multiple times than other similar products. |
| Require fast processing of fine-grained data | | | √ | TDengine has achieved the same level of performance with relational and NoSQL data processing systems. |
### System Maintenance Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. |
| Require controllable operation learning cost | | | √ | As above. |
| Require abundant talent supply | √ | | | As a new-generation product, its still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. |

View File

@ -0,0 +1,223 @@
# Quick Start
## <a class="anchor" id="install"></a>Quick Install
TDegnine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDegnine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
### <a class="anchor" id="source-install"></a>Install from Source
Please visit our [TDengine github page](https://github.com/taosdata/TDengine) for instructions on installation from the source code.
### Install from Docker Container
Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html).
### <a class="anchor" id="package-install"></a>Install from Package
Its extremely easy to install for TDegnine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package.
For more about installation process, please refer [TDengine Installation Packages: Install and Uninstall](https://www.taosdata.com/blog/2019/08/09/566.html), and [Video Tutorials](https://www.taosdata.com/blog/2020/11/11/1941.html).
## <a class="anchor" id="start"></a>Quick Launch
After installation, you can start the TDengine service by the `systemctl` command.
```bash
$ systemctl start taosd
```
Then check if the service is working now.
```bash
$ systemctl status taosd
```
If the service is running successfully, you can play around through TDengine shell `taos`.
**Note:**
- The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user.
- To get better product feedback and improve our solution, TDegnine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
- TDegnine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
- TDengine supports installation on Linux systems with[ systemd ](https://en.wikipedia.org/wiki/Systemd)as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system:
```bash
$ which systemctl
```
If `systemd` is not supported in the system, TDengine service can also be launched via `/usr/local/taos/bin/taosd` manually.
## <a class="anchor" id="console"></a>TDengine Shell Command Line
To launch TDengine shell, the command line interface, in a Linux terminal, type:
```bash
$ taos
```
The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](https://www.taosdata.com/en/faq) page for troubleshooting the connection error). The TDengine shell prompt is:
```cmd
taos>
```
In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:
```mysql
create database demo;
use demo;
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;
ts | speed |
===================================
19-07-15 00:00:00.000| 10|
19-07-15 01:00:00.000| 20|
Query OK, 2 row(s) in set (0.001700s)
```
Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.
### Shell Command Line Parameters
You can configure command parameters to change how TDengine shell executes. Some frequently used options are listed below:
- -c, --config-dir: set the configuration directory. It is */etc/taos* by default.
- -h, --host: set the IP address of the server it will connect to. Default is localhost.
- -s, --commands: set the command to run without entering the shell.
- -u, -- user: user name to connect to server. Default is root.
- -p, --password: password. Default is 'taosdata'.
- -?, --help: get a full list of supported options.
Examples:
```bash
$ taos -h 192.168.0.1 -s "use db; show tables;"
```
### Run SQL Command Scripts
Inside TDengine shell, you can run SQL scripts in a file with source command.
```mysql
taos> source <filename>;
```
### Shell Tips
- Use up/down arrow key to check the command history
- To change the default password, use "alter user" command
- Use ctrl+c to interrupt any queries
- To clean the schema of local cached tables, execute command `RESET QUERY CACHE`
## <a class="anchor" id="demo"></a>Experience TDengines Lightning Speed
After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal.
```bash
$ taosdemo
```
Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns `f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted.
In the TDengine client, enter sql query commands and then experience our lightning query speed.
- query total rows of records
```mysql
taos> select count(*) from test.meters;
```
- query average, max and min of the total 1 billion records
```mysql
taos> select avg(f1), max(f2), min(f3) from test.meters;
```
- query the number of records where loc="beijing":
```mysql
taos> select count(*) from test.meters where loc="beijing";
```
- query the average, max and min of total records where areaid=10
```mysql
taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
```
- query the average, max, min from table t10 when aggregating over every 10s:
```mysql
taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
```
**Note**: you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options.
## Client and Alarm Module
If your client and server running on different machines, please install the client separately. Linux and Windows packages are provided:
- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M)
- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M)
- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M)
Linux package of Alarm Module is as following (please refer [How to Use Alarm Module](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)):
- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M)
## <a class="anchor" id="platforms"></a>List of Supported Platforms
List of platforms supported by TDengine server
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 |
| ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ |
| X64 | ● | ● | | ○ | ● | ● |
| Raspberry ARM32 | | ● | ● | | | |
| Loongson MIPS64 | | | ● | | | |
| Kunpeng ARM64 | | ○ | ○ | | ● | |
| SWCPU Alpha64 | | | ○ | ● | | |
| FT ARM64 | | ○Ubuntu Kylin | | | | |
| Hygon X64 | ● | ● | ● | ○ | ● | ● |
| Rockchip ARM64/32 | | | ○ | | | |
| Allwinner ARM64/32 | | | ○ | | | |
| Actions ARM64/32 | | | ○ | | | |
| TI ARM32 | | | ○ | | | |
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
List of platforms supported by TDengine client and connectors
At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32.
Comparison matrix as following:
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Shenwei** | **X64 TimecomTech** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
Please visit [Connectors](https://www.taosdata.com/en/documentation/connector) section for more detailed information.

View File

@ -0,0 +1,436 @@
# Data Model and Architecture
## <a class="anchor" id="model"></a> Data Model
### A Typical IoT Scenario
In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the collection devices of the same type, there are often many specific collection devices distributed in places. BigData processing system aims to collect all kinds of data, and then calculate and analyze them. For the same kind of devices, the data collected are very regular. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
<figure><table>
<thead><tr>
<th style="text-align:center;">Device ID</th>
<th style="text-align:center;">Time Stamp</th>
<th style="text-align:center;" colspan="3">Collected Metrics</th>
<th style="text-align:center;" colspan="2">Tags</th>
</tr>
<tr>
<th style="text-align:center;">Device ID</th>
<th style="text-align:center;">Time Stamp</th>
<th style="text-align:center;">current</th>
<th style="text-align:center;">voltage</th>
<th style="text-align:center;">phase</th>
<th style="text-align:center;">location</th>
<th style="text-align:center;">groupId</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548685000</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">219</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1002</td>
<td style="text-align:center;">1538548684000</td>
<td style="text-align:center;">10.2</td>
<td style="text-align:center;">220</td>
<td style="text-align:center;">0.23</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1003</td>
<td style="text-align:center;">1538548686500</td>
<td style="text-align:center;">11.5</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.35</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1004</td>
<td style="text-align:center;">1538548685500</td>
<td style="text-align:center;">13.4</td>
<td style="text-align:center;">223</td>
<td style="text-align:center;">0.29</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548695000</td>
<td style="text-align:center;">12.6</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.33</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1004</td>
<td style="text-align:center;">1538548696600</td>
<td style="text-align:center;">11.8</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.28</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1002</td>
<td style="text-align:center;">1538548696650</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.25</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548696800</td>
<td style="text-align:center;">12.3</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
</tbody>
</table></figure>
<center> Table 1: Smart meter example data </center>
Each data record contains the device ID, timestamp, collected metrics (current, voltage, phase as above), and static tags (Location and groupId in Table 1) associated with the devices. Each device generates a data record in a pre-defined timer or triggered by an external event. It is a sequence of data points like a stream.
### Data Characteristics
As the data points are a series of data points over time, the data points generated by IoT, Internet of Vehicles, and Operation Monitoring have some strong common characteristics:
1. Metrics are always structured data;
2. There are rarely delete/update operations on collected data;
3. No need for transactions of traditional databases
4. The ratio of reading is lower but write is higher than typical Internet applications;
5. data flow is uniform and can be predicted according to the number of devices and collection frequency;
6. the user pays attention to the trend of data, not a specific value at a specific time;
7. there is always a data retention policy;
8. the data query is always executed in a given time range and a subset of space;
9. in addition to storage and query operations, various statistical and real-time calculation operations are also required;
10. data volume is huge, a system may generate over 10 billion data points in a day.
By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency.
### Relational Database Model
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a shallow learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. Standard SQL is used, instead of NoSQLs key-value storage.
### One Table for One Collection Point
To utilize this time-series and other data features, TDengine requires the user to create a table for each collection point to store collected time-series data. For example, if there are over 10 millions smart meters, means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages:
1. Guarantee that all data from a collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude.
2. Since the data generation process of each collection device is completely independent, means each device has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the speed.
3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block.
If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to continuously stored together. **The method of one table for each data collection point can ensure the optimal performance of insertion and query of a single data collection point to the greatest extent.**
TDengine suggests using collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and wont build the index on any metrics stored. All data will be stored in columns.
### STable: A Collection of Data Points in the Same Type
The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine.
STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.**
When aggregating multiple data collection points with the same data type, TDEngine will first find out the tables that meet the tag filters from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of aggregation calculation.
## <a class="anchor" id="cluster"></a> Cluster and Primary Logic Unit
The design of TDengine is based on the assumption that one single hardware or software system is unreliable and that no single computer can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed according to a distributed and high-reliability architecture since Day One of R&D, which supports scale-out, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment.
### Primary Logic Unit
Logical structure diagram of TDengine distributed architecture as following:
![TDengine architecture diagram](page://images/architecture/structure.png)
<center> Picture 1: TDengine architecture diagram </center>
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (taosc) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through taosc's API. The following is a brief introduction to each logical unit.
**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
**Virtual node (vnode)**: In order to better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage, and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs, and is created and managed by the management node.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is used to manage between mnodes, and the data synchronization is carried out in a strong consistent way. Any data update operation can only be done on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C + + language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C + +/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
### Node Communication
**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transmission.
**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address is variable, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the normal operation of DNS service, or configure hosts files on nodes and the nodes where applications are located.
**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. When using, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort.
**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option-h, and the configured port number can be specified through -p. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted.
**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connected. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step.
**Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if its not an mnode by self, it will reply the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes.
### A Typical Messaging Process
To explain the relationship between vnode, mnode, taosc and application and their respective roles, the following is an analysis of a typical data writing process.
![ typical process of TDengine](page://images/architecture/message.png)
<center> Picture 2 typical process of TDengine </center>
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
2. Cache be checked by taosc that if meta data existing for the table. If so, go straight to Step 4. If not, taosc sends a get meta-data request to mnode.
3. Mnode returns the meta-data of the table to taosc. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If taosc does not receive a response from the mnode for a long time, and there are multiple mnodes, taosc will send a request to the next mnode.
4. Taosc initiates an insert request to master vnode.
5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will judge the node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup.
6. Taosc notifies APP that writing is successful.
For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the externally serving End Point of the configured cluster. If the dnode that received the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode.
For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnodeID is the master and send a request to it. If the requested vnode is not the master, it will reply the actual master as a new target taosc makes a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node.
The above is the process of inserting data, and the processes of querying and calculating are completely consistent. Taosc encapsulates and shields all these complicated processes, and has no perception and no special treatment for applications.
Through taosc caching mechanism, mnode needs to be accessed only when a table is operated for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), taosc will interact with mnode regularly to automatically update the cache.
## <a class="anchor" id="sharding"></a> Storage Model and Data Partitioning/Sharding
### Storage Model
The data stored by TDengine include collected time-series data, metadata related to libraries and tables, tag data, etc. These data are specifically divided into three parts:
- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when update parameter is set to 1. By adopting the model with one table for each collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple add operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single collection point with best performance.
- Tag data: meta files stored in vnode support four standard operations of add, delete, modify and check. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. If there are many tag filtering operations, queries will be very frequent and TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the filtering results will return in milliseconds.
- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of add, delete, modify and query are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck.
Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages:
- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicates, so wasting storage space. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite again, which is extremely expensive to operate.
- Realize extremely efficient aggregation query between multiple tables: when doing aggregation query between multiple tables, it firstly finds out the tag filtered tables, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the query efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
### Data Sharding
For large-scale data management, to achieve scale-out, it is generally necessary to adopt the a Partitioning strategy as Sharding. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range.
VNode (Virtual Data Node) is responsible for providing writing, query and calculation functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application.
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
The meda data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is convenient for efficient and parallel tag filtering operations.
### Data Partitioning
In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter “days”. This method of partitioning by time rang is also convenient to efficiently implement the data retention strategy. As long as the data file exceeds the specified number of days (system configuration parameter keep), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the cold/hot management of big data and realize tiered-storage.
In general, **TDengine splits big data by vnode and time as two dimensions**, which is convenient for parallel and efficient management with scale-out.
### Load Balancing
Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) for declaring the status of the entire cluster. Based on the overall state, when an mnode finds an overloaded dnode, it will migrate one or more vnodes to other dnodes. In the process, external services keep running and the data insertion, query and calculation operations are not affected.
If the mnode has not received the dnode status for a period of time, the dnode will be judged as offline. When offline lasts a certain period of time (the duration is determined by the configuration parameter offlineThreshold), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure t the replica number.
When new data nodes are added to the cluster, with new computing and storage are added, the system will automatically start the load balancing process.
The load balancing process does not require any manual intervention without application restarted. It will automatically connect new nodes with completely transparence. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.**
## <a class="anchor" id="replication"></a> Data Writing and Replication Process
If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies taosc to redirect.
### Master vnode Writing Process
Master Vnode uses a writing process as follows:
Figure 3: TDengine Master writing process
1. Master vnode receives the application data insertion request, verifies, and to next step;
2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Master vnode returns a confirmation message to the application, indicating a successful writing.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
### Slave vnode Writing Process
For a slave vnode, the write process as follows:
![TDengine Slave Writing Process](page://images/architecture/write_master.png)
<center> Picture 3 TDengine Slave Writing Process </center>
1. Slave vnode receives a data insertion request forwarded by Master vnode.
2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. Write into memory and add the record to “skip list”;
Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory is exactly the same as WAL.
### Remote Disaster Recovery and IDC Migration
As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with not obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can realize IDC room migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
However, this asynchronous replication method has a tiny time window of written data lost. The specific scenario is as follows:
1. Master vnode has completed its 5-step operations, confirmed the success of writing to APP, and then went down;
2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Slave vnode will become the new master, thus losing one record
In theory, as long as in asynchronous replication, there is no guarantee for no losing. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before.
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
### Master/slave Selection
Vnode maintains a Version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is collecting time-series data or metadata, this version number will be increased by one.
When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. Its necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows:
1. If theres only one replica, its always master
2. When all replicas are online, the one with latest version is master
3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master
See [TDengine 2.0 Data Replication Module Design](https://www.taosdata.com/cn/documentation/architecture/replica/) for more information on the data replication process.
### Synchronous Replication
For scenarios with higher data consistency requirements, asynchronous data replication is not applicable, because there is some small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication.
Note: synchronous replication between vnodes is only supported in Enterprise Edition
## <a class="anchor" id="persistence"></a> Caching and Persistence
### Caching
TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly put the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the newly generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer.
TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data buffer by setting appropriate configuration parameters without deploying Redis or other additional cache systems**, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer as so in a proprietary key-value cache system.
Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”.
### Persistent Storage
TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth.
To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter “days”. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations.
For collected data, there is generally a retention period, which is determined by the system configuration parameter “keep”. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space.
Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set.
In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter “maxRows” (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter “minRows” (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file.
When data is written to disk, it is decided whether to compress the data according to system configuration parameter “comp”. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
### Tiered Storage
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
dataDir format is as follows:
```
dataDir data_path [tier_level]
```
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
```
dataDir /mnt/disk1/taos
dataDir /mnt/disk2/taos 0
dataDir /mnt/disk3/taos 1
dataDir /mnt/disk4/taos 1
dataDir /mnt/disk5/taos 2
dataDir /mnt/disk6/taos 2
```
Mounted disks can also be a non-local network disk, as long as the system can access it.
Note: Tiered Storage is only supported in Enterprise Edition
## <a class="anchor" id="query"></a>Data Query
TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. The query processing of TDengine needs the collaboration of client, vnode and mnode.
### Single Table Query
The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then request metadata information (table metadata) for the table specified in the query from management node (mnode).
According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time.
When client obtains query result, the worker thread in query execution queue of dnode will wait for the execution of vnode execution thread to complete before returning the query result to the requesting client.
### Aggregation by Time Axis, Downsampling, Interpolation
The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and unique function from common databases. From this point of view, it is similar to the window query of stream computing engine.
The keyword “interval” is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated according to time windows, and the data within window range are aggregated as needed. For example:
```mysql
select count(*) from d1001 interval(1h);
```
According to the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window.
In application scenarios where query results need to be obtained continuously, if there is data missing in a given time interval, the data results in this interval will also be lost. TDengine provides a strategy to interpolate the results of timeline aggregation calculation. The results of time axis aggregation can be interpolated by using keyword Fill. For example:
```mysql
select count(*) from d1001 interval(1h) fill(prev);
```
According to the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value).
### Multi-table Aggregation Query
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is completely consistent, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure:
![Diagram of multi-table aggregation query](page://images/architecture/multi_tables.png)
<center> Picture 4 Diagram of multi-table aggregation query </center>
1. Application sends a query condition to system;
2. taosc sends the STable name to Meta Node(management node);
3. Management node sends the vnode list owned by the STable back to taosc;
4. taosc sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes;
5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to taosc;
6. taosc finally aggregates the results returned by multiple data nodes and send them back to application.
Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation calculation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation calculation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
### Precomputation
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL.

View File

@ -0,0 +1,74 @@
# Data Modeling
TDengine adopts a relational data model, so we need to build the "database" and "table". Therefore, for a specific application scenario, it is necessary to consider the design of the database, STable and ordinary table. This section does not discuss detailed syntax rules, but only concepts.
Please watch the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1945.html) for data modeling.
## <a class="anchor" id="create-db"></a> Create a Database
Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example:
```mysql
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1;
```
The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management).
After the database created, please use SQL command USE to switch to the new database, for example:
```mysql
USE power;
```
Replace the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database name. table name" to specify the name of database to use.
**Note:**
- Any table or STable belongs to a database. Before creating a table, a database must be created first.
- Tables in two different databases cannot be JOIN.
## <a class="anchor" id="create-stable"></a> Create a STable
An IoT system often has many types of devices, such as smart meters, transformers, buses, switches, etc. for power grids. In order to facilitate aggregation among multiple tables, using TDengine, it is necessary to create a STable for each type of data collection point. Taking the smart meter in Table 1 as an example, you can use the following SQL command to create a STable:
```mysql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
```
**Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15.
Just like creating an ordinary table, you need to provide the table name (meters in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (ts in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tag can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details.
Each type of data collection point needs an established STable, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, transformers, buses, switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, some collection points capture parameters such as current and voltage, and some capture environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All collected physical metrics contained in one and the same STable must be collected at the same time (with a consistent timestamp).
A STable allows up to 1024 columns. If the number of physical metrics collected at a collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables.
## <a class="anchor" id="create-table"></a> Create a Table
TDengine builds a table independently for each data collection point. Similar to standard relational data, one table has a table name, Schema, but in addition, it can also carry one or more tags. When creating, you need to use the STable as a template and specify the specific value of the tag. Taking the smart meter in Table 1 as an example, the following SQL command can be used to build the table:
```mysql
CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
```
Where d1001 is the table name, meters is the name of the STable, followed by the specific tag value of tag Location as "Beijing.Chaoyang", and the specific tag value of tag groupId 2. Although the tag value needs to be specified when creating the table, it can be modified afterwards. Please refer to the [Table Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#table) for details.
**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this method to create a table.
TDengine suggests to use the globally unique ID of data collection point as a table name (such as device serial number). However, in some scenarios, there is no unique ID, and multiple IDs can be combined into a unique ID. It is not recommended to use a unique ID as tag value.
**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table building syntax when writing data. If the table already exists, no new table will be created. For example:
```mysql
INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
```
The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to automatically create it, and the tag value "Beijing.Chaoyang", 2 is marked at the same time.
For detailed syntax of automatic table building, please refer to the "[Automatic Table Creation When Inserting Records](https://www.taosdata.com/en/documentation/taos-sql#auto_create_table)" section.
## Multi-column Model vs Single-column Model
TDengine supports multi-column model. As long as physical metrics are collected simultaneously by a data collection point (with a consistent timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which each collected physical metric is set up separately, so each type of physical metrics is set up separately with a STable. For example, create 3 Stables, one each for current, voltage and phase.
TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the structure definition of STable needs to be frequently modified so make the application complicated. To avoid that, single-column model is recommended.

View File

@ -0,0 +1,282 @@
# Efficient Data Writing
TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion.
## <a class="anchor" id="sql"></a> SQL Writing
Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
```
TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
```
TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
```
For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosdata.com/en/documentation/taos-sql#insert)。
**Tips:**
- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M).
- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead.
- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record.
- The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written.
## <a class="anchor" id="prometheus"></a> Direct Writing of Prometheus
As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_prometheus From Source
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
- A server running Linux OS
- Golang version 1.10 and higher installed
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows:
```bash
cd blm_prometheus
go build
```
If everything goes well, an executable of blm_prometheus will be generated in the corresponding directory.
### Install Prometheus
Download and install as the instruction of Prometheus official website. [Download Address](https://prometheus.io/download/)
### Configure Prometheus
Read the Prometheus [configuration document](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) and add following configurations in the section of Prometheus configuration file
- url: The URL provided by bailongma API service, refer to the blm_prometheus startup example section below
After Prometheus launched, you can check whether data is written successfully through query taos client.
### Launch blm_prometheus
blm_prometheus has following options that you can configure when you launch blm_prometheus.
```sh
--tdengine-name
If TDengine is installed on a server with a domain name, you can also access the TDengine by configuring the domain name of it. In K8S environment, it can be configured as the service name that TDengine runs
--batch-size
blm_prometheus assembles the received prometheus data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_prometheus will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is'root '
--dbpassword
Set the password to access TDengine, the default value is'taosdata '
--port
The port number blm_prometheus used to serve prometheus.
```
### Example
Launch an API service for blm_prometheus with the following command:
```bash
./blm_prometheus -port 8088
```
Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as:
remote_write:
\- url: "http://10.1.2.3:8088/receive"
### Query written data of prometheus
The format of generated data by Prometheus is as follows:
```json
{
Timestamp: 1576466279341,
Value: 37.000000,
apiserver_request_latencies_bucket {
component="apiserver",
instance="192.168.99.116:8443",
job="kubernetes-apiservers",
le="125000",
resource="persistentvolumes", s
cope="cluster",
verb="LIST",
version=“v1"
}
}
```
Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
```mysql
use prometheus;
select * from apiserver_request_latencies_bucket;
```
## <a class="anchor" id="telegraf"></a> Direct Writing of Telegraf
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_telegraf From Source Code
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
- A server running Linux OS
- Golang version 1.10 and higher installed
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_telegraf, which holds the Telegraf writing API. The compiling process is as follows:
```bash
cd blm_telegraf
go build
```
If everything goes well, an executable of blm_telegraf will be generated in the corresponding directory.
### Install Telegraf
At the moment, TDengine supports Telegraf version 1.7. 4 and above. Users can download the installation package on Telegraf's website according to your current operating system. The download address is as follows: https://portal.influxdata.com/downloads
### Configure Telegraf
Modify the TDengine-related configurations in the Telegraf configuration file /etc/telegraf/telegraf.conf.
In the output plugins section, add the [[outputs.http]] configuration:
- url: The URL provided by bailongma API service, please refer to the example section below
- data_format: "json"
- json_timestamp_units: "1ms"
In agent section:
- hostname: The machine name that distinguishes different collection devices, and it is necessary to ensure its uniqueness
- metric_batch_size: 100, which is the max number of records per batch wriiten by Telegraf allowed. Increasing the number can reduce the request sending frequency of Telegraf.
For information on how to use Telegraf to collect data and more about using Telegraf, please refer to the official [document](https://docs.influxdata.com/telegraf/v1.11/) of Telegraf.
### Launch blm_telegraf
blm_telegraf has following options, which can be set to tune configurations of blm_telegraf when launching.
```sh
--host
The ip address of TDengine server, default is null
--batch-size
blm_prometheus assembles the received telegraf data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_telegraf will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is 'root '
--dbpassword
Set the password to access TDengine, the default value is'taosdata '
--port
The port number blm_telegraf used to serve Telegraf.
```
### Example
Launch an API service for blm_telegraf with the following command
```bash
./blm_telegraf -host 127.0.0.1 -port 8089
```
Assuming that the IP address of the server where blm_telegraf located is "10.1.2. 3", the URL shall be added to the configuration file of telegraf as:
```yaml
url = "http://10.1.2.3:8089/telegraf"
```
### Query written data of telegraf
The format of generated data by telegraf is as follows:
```json
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 89.7897897897898,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 5.405405405405405,
"usage_user": 4.804804804804805
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "bogon"
},
"timestamp": 1576464360
}
```
Where the name field is the name of the time-series data collected by telegraf, and the tag field is the tag of the time-series data. blm_telegraf automatically creates a STable in TDengine with the name of the time series data, and converts the tag field into the tag value of TDengine, with Timestamp as the timestamp and fields values as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
```mysql
use telegraf;
select * from cpu;
```
MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine.
## <a class="anchor" id="emq"></a> Direct Writing of EMQ Broker
[EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details.
## <a class="anchor" id="hivemq"></a> Direct Writing of HiveMQ Broker
[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.

View File

@ -0,0 +1,99 @@
# Efficient Data Querying
## <a class="anchor" id="queries"></a> Main Query Features
TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
- Single-column and multi-column data query
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc
- Group by, Order by, Limit/Offset of aggregation results
- Four operations for numeric columns and aggregation results
- Time stamp aligned join query (implicit join) operations
- Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc
For example, in TAOS shell, the records with vlotage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted.
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
======================================================================================
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 |
Query OK, 2 row(s) in set (0.001100s)
```
In order to meet the needs of an IoT scenario, TDengine supports several special functions, such as twa (time weighted average), spread (difference between maximum and minimum), last_row (last record), etc. More functions related to IoT scenarios will be added. TDengine also supports continuous queries.
For specific query syntax, please see the [Data Query section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#select).
## <a class="anchor" id="aggregation"></a> Multi-table Aggregation Query
In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same.
**Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location
```mysql
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
=============================================================
222.000000000 | Beijing.Haidian |
219.200000000 | Beijing.Chaoyang |
Query OK, 2 row(s) in set (0.002136s)
```
**Example 2**: In TAOS Shell, look up the number of records with groupId 2 in the past 24 hours, check the maximum current of all smart meters
```mysql
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
cunt(*) | max(current) |
==================================
5 | 13.4 |
Query OK, 1 row(s) in set (0.002136s)
```
TDengine only allows aggregation queries between tables belonging to a same STable, means aggregation queries between different STables are not supported. In the Data Query section of TAOS SQL, query class operations will all be indicated that whether STables are supported.
## <a class="anchor" id="sampling"></a> Down Sampling Query, Interpolation
In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds.
```mysql
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
ts | sum(current) |
======================================================
2018-10-03 14:38:00.000 | 10.300000191 |
2018-10-03 14:38:10.000 | 24.900000572 |
Query OK, 2 row(s) in set (0.000883s)
```
The down sampling operation is also applicable to STables, such as summing the current values collected by all smart meters in Beijing every second.
```mysql
taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
2018-10-03 14:38:05.000 | 32.900000572 |
2018-10-03 14:38:06.000 | 11.500000000 |
2018-10-03 14:38:15.000 | 12.600000381 |
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
The down sampling operation also supports time offset, such as summing the current values collected by all smart meters every second, but requires each time window to start from 500 milliseconds.
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, its required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function.
For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation).

View File

@ -0,0 +1,360 @@
# Advanced Features
## <a class="anchor" id="continuous-query"></a> Continuous Query
Continuous Query is a query executed by TDengine periodically with a sliding window, it is a simplified stream computing driven by timers. Continuous query can be applied to a table or a STable automatically and periodically, and the result set can be passed to the application directly via call back function, or written into a new table in TDengine. The query is always executed on a specified time window (window size is specified by parameter interval), and this window slides forward while time flows (the sliding period is specified by parameter sliding).
Continuous query of TDengine adopts time-driven mode, which can be defined directly by TAOS SQL without additional operation. Using continuous query, results can be generated conveniently and quickly according to the time window, thus down sampling the original collected data. After the user defines a continuous query through TAOS SQL, TDengine automatically pulls up the query at the end of the last complete time period and pushes the calculated results to the user or writes them back to TDengine.
The continuous query provided by TDengine differs from the time window calculation in ordinary stream computing in the following ways:
- Unlike the real-time feedback calculated results of stream computing, continuous query only starts calculation after the time window is closed. For example, if the time period is 1 day, the results of that day will only be generated after 23:59:59.
- If a history record is written to the time interval that has been calculated, the continuous query will not recalculate and will not push the results to the user again. For the mode of writing back to TDengine, the existing calculated results will not be updated.
- Using the mode of continuous query pushing results, the server does not cache the client's calculation status, nor does it provide Exactly-Once semantic guarantee. If the user's application side crashed, the continuous query pulled up again would only recalculate the latest complete time window from the time pulled up again. If writeback mode is used, TDengine can ensure the validity and continuity of data writeback.
### How to use continuous query
The following is an example of the smart meter scenario to introduce the specific use of continuous query. Suppose we create a STables and sub-tables through the following SQL statement:
```sql
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int);
create table D1001 using meters tags ("Beijing.Chaoyang", 2);
create table D1002 using meters tags ("Beijing.Haidian", 2);
...
```
We already know that the average voltage of these meters can be counted with one minute as the time window and 30 seconds as the forward increment through the following SQL statement.
```sql
select avg(voltage) from meters interval(1m) sliding(30s);
```
Every time this statement is executed, all data will be recalculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly:
```sql
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
```
There is no problem with this, but TDengine provides a simpler method, just add `create table {tableName} as` before the initial query statement, for example:
```sql
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
```
A new table named `avg_vol` will be automatically created, and then every 30 seconds, TDengine will incrementally execute the SQL statement after `as` and write the query result into this table. The user program only needs to query the data from `avg_vol`. For example:
```mysql
taos> select * from avg_vol;
ts | avg_voltage_ |
===================================================
2020-07-29 13:37:30.000 | 222.0000000 |
2020-07-29 13:38:00.000 | 221.3500000 |
2020-07-29 13:38:30.000 | 220.1700000 |
2020-07-29 13:39:00.000 | 223.0800000 |
```
It should be noted that the minimum value of the query time window is 10 milliseconds, and there is no upper limit of the time window range.
In addition, TDengine also supports users to specify the starting and ending times of a continuous query. If the start time is not entered, the continuous query will start from the time window where the first original data is located; If no end time is entered, the continuous query will run permanently; If the user specifies an end time, the continuous query stops running after the system time reaches the specified time. For example, a continuous query created with the following SQL will run for one hour and then automatically stop.
```mysql
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s);
```
It should be noted that now in the above example refers to the time when continuous queries are created, not the time when queries are executed, otherwise, queries cannot be stopped automatically. In addition, in order to avoid the problems caused by delayed writing of original data as much as possible, there is a certain delay in the calculation of continuous queries in TDengine. In other words, after a time window has passed, TDengine will not immediately calculate the data of this window, so it will take a while (usually not more than 1 minute) to find the calculation result.
### Manage the Continuous Query
Users can view all continuous queries running in the system through the show streams command in the console, and can kill the corresponding continuous queries through the kill stream command. Subsequent versions will provide more finer-grained and convenient continuous query management commands.
## <a class="anchor" id="subscribe"></a> Publisher/Subscriber
Based on the natural time-series characteristics of data, the data insert of TDengine is logically consistent with the data publish (pub) of messaging system, which can be regarded as a new record inserted with timestamp in the system. At the same time, TDengine stores data in strict accordance with the monotonous increment of time-series. Essentially, every table in TDengine can be regarded as a standard messaging queue.
TDengine supports embedded lightweight message subscription and publishment services. Using the API provided by the system, users can subscribe to one or more tables in the database using common query statements. The maintenance of subscription logic and operation status is completed by the client. The client regularly polls the server for whether new records arrive, and the results will be fed back to the client when new records arrive.
The status of the subscription and publishment services of TDengine is maintained by the client, but not by the TDengine server. Therefore, if the application restarts, it is up to the application to decide from which point of time to obtain the latest data.
In TDengine, there are three main APIs relevant to subscription:
```c
taos_subscribe
taos_consume
taos_unsubscribe
```
Please refer to the [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector/) for the documentation of these APIs. The following is still a smart meter scenario as an example to introduce their specific usage (please refer to the previous section "Continuous Query" for the structure of STables and sub-tables). The complete sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/tests/examples/c/subscribe.c).
If we want to be notified and do some process when the current of a smart meter exceeds a certain limit (e.g. 10A), there are two methods: one is to query each sub-table separately, record the timestamp of the last piece of data after each query, and then only query all data after this timestamp:
```sql
select * from D1001 where ts > {last_timestamp1} and current > 10;
select * from D1002 where ts > {last_timestamp2} and current > 10;
...
```
This is indeed feasible, but as the number of meters increases, the number of queries will also increase, and the performance of both the client and the server will be affected, until the system cannot afford it.
Another method is to query the STable. In this way, no matter how many meters there are, only one query is required:
```sql
select * from meters where ts > {last_timestamp} and current > 10;
```
However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data storage are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed.
The subscription function of TDengine provides a thorough solution to the above problem.
First, use `taos_subscribe` to create a subscription:
```c
TAOS_SUB* tsub = NULL;
if (async) {
  // create an asynchronized subscription, the callback function will be called every 1s
  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000);
} else {
  // create an synchronized subscription, need to call 'taos_consume' manually
  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0);
}
```
Subscriptions in TDengine can be either synchronous or asynchronous, and the above code will decide which method to use based on the value of parameter `async` obtained from the command line. Here, synchronous means that the user program calls `taos_consume` directly to pull data, while asynchronous means that the API calls `taos_consume` in another internal thread, and then gives the pulled data to the callback function `subscribe_callback` for processing.
Parameter `taos` is an established database connection and has no special requirements in synchronous mode. However, in asynchronous mode, it should be noted that it will not be used by other threads, otherwise it may lead to unpredictable errors, because the callback function is called in the internal thread of the API, while some APIs of TDengine are not thread-safe.
Parameter `sql` is a query statement in which you can specify filters using where clause. In our example, if you only want to subscribe to data when the current exceeds 10A, you can write as follows:
```sql
select * from meters where current > 10;
```
Note that the starting time is not specified here, so the data of all timers will be read. If you only want to start subscribing from the data one day ago and do not need earlier historical data, you can add a time condition:
```sql
select * from meters where ts > now - 1d and current > 10;
```
The `topic` of the subscription is actually its name, because the subscription function is implemented in the client API, so it is not necessary to ensure that it is globally unique, but it needs to be unique on a client machine.
If the subscription of name `topic` does not exist, the parameter restart is meaningless; However, if the user program exits after creating this subscription, when it starts again and reuses this `topic`, `restart` will be used to decide whether to read data from scratch or from the previous location. In this example, if `restart` is **true** (non-zero value), the user program will definitely read all the data. However, if this subscription exists before, and some data has been read, and `restart` is **false** (zero), the user program will not read the previously read data.
The last parameter of `taos_subscribe` is the polling period in milliseconds. In synchronous mode, if the interval between the two calls to `taos_consume` is less than this time, `taos_consume` will block until the interval exceeds this time. In asynchronous mode, this time is the minimum time interval between two calls to the callback function.
The penultimate parameter of `taos_subscribe` is used by the user program to pass additional parameters to the callback function, which is passed to the callback function as it is without any processing by the subscription API. This parameter is meaningless in sync mode.
After created, the subscription can consume data. In synchronous mode, the sample code is the following as the `else` section:
```c
if (async) {
  getchar();
} else while(1) {
  TAOS_RES* res = taos_consume(tsub);
  if (res == NULL) {
    printf("failed to consume data.");
    break;
  } else {
    print_result(res, blockFetch);
    getchar();
  }
}
```
Here is a **while** loop. Every time the user presses the Enter key, `taos_consume` is called, and the return value of `taos_consume` is the query result set, which is exactly the same as `taos_use_result`. In the example, the code using this result set is the function `print_result`:
```c
void print_result(TAOS_RES* res, int blockFetch) {
  TAOS_ROW row = NULL;
  int num_fields = taos_num_fields(res);
  TAOS_FIELD* fields = taos_fetch_fields(res);
  int nRows = 0;
  if (blockFetch) {
    nRows = taos_fetch_block(res, &row);
    for (int i = 0; i < nRows; i++) {
      char temp[256];
      taos_print_row(temp, row + i, fields, num_fields);
      puts(temp);
    }
  } else {
    while ((row = taos_fetch_row(res))) {
      char temp[256];
      taos_print_row(temp, row, fields, num_fields);puts(temp);
      nRows++;
    }
  }
  printf("%d rows consumed.\n", nRows);
}
```
Among them, `taos_print_row` is used to process subscription to data. In our example, it will print out all eligible records. In asynchronous mode, it is simpler to consume subscribed data:
```c
void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
  print_result(res, *(int*)param);
}
```
To end a data subscription, you need to call `taos_unsubscribe`:
```c
taos_unsubscribe(tsub, keep);
```
Its second parameter is used to decide whether to keep the progress information of subscription on the client. If this parameter is **false** (zero), the subscription can only be restarted no matter what the `restart` parameter is when `taos_subscribe` is called next time. In addition, progress information is saved in the directory {DataDir}/subscribe/. Each subscription has a file with the same name as its `topic`. Deleting a file will also lead to a new start when the corresponding subscription is created next time.
After introducing the code, let's take a look at the actual running effect. For exmaple:
- Sample code has been downloaded locally
- TDengine has been installed on the same machine
- All the databases, STables and sub-tables required by the example have been created
You can compile and start the sample program by executing the following command in the directory where the sample code is located:
```shell
$ make
$ ./subscribe -sql='select * from meters where current > 10;'
```
After the sample program starts, open another terminal window, and the shell that starts TDengine inserts a data with a current of 12A into **D1001**:
```shell
$ taos
> use test;
> insert into D1001 values(now, 12, 220, 1);
```
At this time, because the current exceeds 10A, you should see that the sample program outputs it to the screen. You can continue to insert some data to observe the output of the sample program.
### Use data subscription in Java
The subscription function also provides a Java development interface, as described in [Java Connector](https://www.taosdata.com/cn/documentation/connector/). It should be noted that the Java interface does not provide asynchronous subscription mode at present, but user programs can achieve the same feature by creating TimerTask.
The following is an example to introduce its specific use. The function it completes is basically the same as the C language example described earlier, and it is also to subscribe to all records with current exceeding 10A in the database.
#### Prepare data
```sql
# Create power Database
taos> create database power;
# Switch to the database
taos> use power;
# Create a STable
taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
# Create tables
taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2);
taos> create table d1002 using meters tags ("Beijing.Haidian", 2);
# Insert test data
taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
# Query all records with current over 10A from STable meters
taos> select * from meters where current > 10;
ts | current | voltage | phase | location | groupid |
===========================================================================================================
2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 |
2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 |
2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 |
2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 |
2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 |
Query OK, 5 row(s) in set (0.004896s)
```
#### Example
```java
public class SubscribeDemo {
private static final String topic = "topic-meter-current-bg-10";
private static final String sql = "select * from meters where current > 10";
public static void main(String[] args) {
Connection connection = null;
TSDBSubscribe subscribe = null;
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/power?user=root&password=taosdata";
connection = DriverManager.getConnection(jdbcUrl, properties);
subscribe = ((TSDBConnection) connection).subscribe(topic, sql, true); // Create a subscription
int count = 0;
while (count < 10) {
TimeUnit.SECONDS.sleep(1); / Wait 1 second to avoid calling consume too frequently and causing pressure on server
TSDBResultSet resultSet = subscribe.consume(); // 消费数据
if (resultSet == null) {
continue;
}
ResultSetMetaData metaData = resultSet.getMetaData();
while (resultSet.next()) {
int columnCount = metaData.getColumnCount();
for (int i = 1; i <= columnCount; i++) {
System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i) + "\t");
}
System.out.println();
count++;
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
if (null != subscribe)
subscribe.close(true); // Close the subscription
if (connection != null)
connection.close();
} catch (SQLException throwables) {
throwables.printStackTrace();
}
}
}
}
```
Run the sample program. First, it consumes all the historical data that meets the query conditions:
```shell
# java -jar subscribe.jar
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2
ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
```
Then, add a piece of data to the table via taos client:
```sql
# taos
taos> use power;
taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1);
```
Because the current of this data is greater than 10A, the sample program will consume it:
```shell
ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
```
## <a class="anchor" id="cache"></a> Cache
TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a write-driven cache management mechanism. This strategy is different from the read-driven data cache mode (Least-Recent-Use, LRU), which directly saves the most recently written data in the system buffer. When the buffer reaches a threshold, the oldest data is written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the recently generated data, that is, the current status. TDengine takes full advantage of this feature by storing the most recently arrived (current status) data in the buffer.
TDengine provides data collection in milliseconds to users through query functions. Saving the recently arrived data directly in buffer can respond to the user's query analysis for the latest piece or batch of data more quickly, and provide faster database query response as a whole. In this way, TDengine can be used as a data buffer by setting appropriate configuration parameters without deploying additional caching systems, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the cached data will not reload the previously cached data into the buffer like some proprietary Key-value cache system.
TDengine allocates a fixed size of memory space as a buffer, which can be configured according to application requirements and hardware resources. By properly setting the buffer space, TDengine can provide extremely high-performance write and query support. Each virtual node in TDengine is allocated a separate cache pool when it is created. Each virtual node manages its own cache pool, and different virtual nodes do not share the pool. All tables belonging to each virtual node share the cache pool owned by itself.
TDengine manages the memory pool by blocks, and the data is stored in the form of rows within. The memory pool of a vnode is allocated by blocks when the vnode is created, and each memory block is managed according to the First-In-First-Out strategy. When creating a memory pool, the size of the blocks is determined by the system configuration parameter cache; The number of memory blocks in each vnode is determined by the configuration parameter blocks. So for a vnode, the total memory size is: cache * blocks. A cache block needs to ensure that each table can store at least dozens of records in order to be efficient.
You can quickly obtain the last record of a table or a STable through the function last_row, which is very convenient to show the real-time status or collected values of each device on a large screen. For example:
```mysql
select last_row(voltage) from meters where location='Beijing.Chaoyang';
```
This SQL statement will obtain the last recorded voltage value of all smart meters located in Chaoyang District, Beijing.
## <a class="anchor" id="alert"></a> Alert
In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form.
In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,157 @@
# Connections with Other Tools
## <a class="anchor" id="grafana"></a> Grafana
TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard.
### Install Grafana
TDengine currently supports Grafana 5.2.4 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
https://grafana.com/grafana/download.
### Configure Grafana
TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory.
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### Use Grafana
#### Configure data source
You can log in the Grafana server (username/password:admin/admin) through localhost:3000, and add data sources through `Configuration -> Data Sources` on the left panel, as shown in the following figure:
![img](page://images/connections/add_datasource1.jpg)
Click `Add data source` to enter the Add Data Source page, and enter TDengine in the query box to select Add, as shown in the following figure:
![img](page://images/connections/add_datasource2.jpg)
Enter the data source configuration page and modify the corresponding configuration according to the default prompt:
![img](page://images/connections/add_datasource3.jpg)
- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/)
- User: TDengine username.
- Password: TDengine user password.
Click `Save & Test` to test. Success will be prompted as follows:
![img](page://images/connections/add_datasource4.jpg)
#### Create Dashboard
Go back to the home to create Dashboard, and click `Add Query` to enter the panel query page:
![img](page://images/connections/create_dashboard1.jpg)
As shown in the figure above, select the TDengine data source in Query, and enter the corresponding sql in the query box below to query. Details are as follows:
- INPUT SQL: Enter the statement to query (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` , where `from`, `to` and `interval` are built-in variables of the TDengine plug-in, representing the query range and time interval obtained from the Grafana plug-in panel. In addition to built-in variables, it is also supported to use custom template variables.
- ALIAS BY: You can set alias for the current queries.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variable and generate the final statement to execute.
According to the default prompt, query the average system memory usage at the specified interval of the server where the current TDengine deployed in as follows:
![img](page://images/connections/create_dashboard2.jpg)
> Please refer to Grafana [documents](https://grafana.com/docs/) for how to use Grafana to create the corresponding monitoring interface and for more about Grafana usage.
#### Import Dashboard
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory/usr/local/taos/connector/grafana/tdengine/dashboard/.
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
![img](page://images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported.
![img](page://images/connections/import_dashboard2.jpg)
## <a class="anchor" id="matlab"></a> MATLAB
MATLAB can access data to the local workspace by connecting directly to the TDengine via the JDBC Driver provided in the installation package.
### JDBC Interface Adaptation of MATLAB
Several steps are required to adapt MATLAB to TDengine. Taking adapting MATLAB2017a on Windows10 as an example:
- Copy the file JDBCDriver-1.0.0-dist.ja*r* in TDengine package to the directory ${matlab_root}\MATLAB\R2017a\java\jar\toolbox
- Copy the file taos.lib in TDengine package to ${matlab root dir}\MATLAB\R2017a\lib\win64
- Add the .jar package just copied to the MATLAB classpath. Append the line below as the end of the file of ${matlab root dir}\MATLAB\R2017a\toolbox\local\classpath.txt
- ```
$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
```
- Create a file called javalibrarypath.txt in directory ${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a_, and add the _taos.dll path in the file. For example, if the file taos.dll is in the directory of C:\Windows\System32then add the following line in file javalibrarypath.txt:
- ```
C:\Windows\System32
```
- ### Connect to TDengine in MATLAB to get data
After the above configured successfully, open MATLAB.
- Create a connection:
```matlab
conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)
```
* Make a query:
```matlab
sql0 = [select * from tb]
data = select(conn, sql0);
```
* Insert a record:
```matlab
sql1 = [insert into tb values (now, 1)]
exec(conn, sql1)
```
For more detailed examples, please refer to the examples\Matlab\TDEngineDemo.m file in the package.
## <a class="anchor" id="r"></a> R
R language supports connection to the TDengine database through the JDBC interface. First, you need to install the JDBC package of R language. Launch the R language environment, and then execute the following command to install the JDBC support library for R language:
```R
install.packages('RJDBC', repos='http://cran.us.r-project.org')
```
After installed, load the RJDBC package by executing `library('RJDBC')` command.
Then load the TDengine JDBC driver:
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-2.0.0-dist.jar", identifier.quote="\"")
```
If succeed, no error message will display. Then use the following command to try a database connection:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully, otherwise the connection command needs to be adjusted according to the error prompt. TDengine supports below functions in *RJDBC* package:
- `dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)`: Write the data in a data frame iris to the table test in the TDengine server. Parameter overwrite must be false. append must be TRUE and the schema of the data frame iris should be the same as the table test.
- `dbGetQuery(conn, "select count(*) from test")`: run a query command
- `dbSendUpdate (conn, "use db")`: Execute any non-query sql statement. For example, `dbSendUpdate (conn, "use db")`, write data `dbSendUpdate (conn, "insert into t1 values (now, 99)")`, and the like.
- `dbReadTable(conn, "test")`: read all the data in table test
- `dbDisconnect(conn)`: close a connection
- `dbRemoveTable(conn, "test")`: remove table test
The functions below are not supported currently:
- `dbExistsTable(conn, "test")`: if table test exists
- `dbListTables(conn)`: list all tables in the connection

View File

@ -0,0 +1,235 @@
# TDengine Cluster Management
Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node function.
Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter fqdn in taos.cfg. If you are accustomed to direct IP address access, you can set the parameter fqdn to the IP address of this node.
The cluster management of TDengine is extremely simple. Except for manual intervention in adding and deleting nodes, all other tasks are completed automatically, thus minimizing the workload of operation. This chapter describes the operations of cluster management in detail.
Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1961.html) for cluster building.
## <a class="anchor" id="prepare"></a> Preparation
**Step 0:** Plan FQDN of all physical nodes in the cluster, and add the planned FQDN to /etc/hostname of each physical node respectively; modify the /etc/hosts of each physical node, and add the corresponding IP and FQDN of all cluster physical nodes. [If DNS is deployed, contact your network administrator to configure it on DNS]
**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please delete it first and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)"
**Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data `rm -rf /var/lib/taos/*`on the premise of ensuring that the data is useless or backed up;
**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or Host file.
**Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built;
**Step 3:** Install TDengine on all physical nodes, and the version must be consistent, **but do not start taosd**. During installation, when prompted to enter whether to join an existing TDengine cluster, press enter for the first physical node directly to create a new cluster, and enter the FQDN: port number (default 6030) of any online physical node in the cluster for the subsequent physical nodes;
**Step 4:** Check the network settings of all data nodes and the physical nodes where the application is located:
1. Execute command `hostname -f` on each physical node, and check and confirm that the hostnames of all nodes are different (the node where the application driver is located does not need to do this check).
2. Execute `ping host` on each physical node, wherein host is that hostname of other physical node, and see if other physical nodes can be communicated to; if not, you need to check the network settings, or the /etc/hosts file (the default path for Windows systems is C:\ Windows\ system32\ drivers\ etc\ hosts), or the configuration of DNS. If it fails to ping, then we cann't build the cluster.
3. From the physical node where the application runs, ping the data node where taosd runs. If the ping fails, the application cannot connect to taosd. Please check the DNS settings or hosts file of the physical node where the application is located;
4. The End Point of each data node is the output hostname plus the port number, for example, [h1.taosdata.com](http://h1.taosdata.com/): 6030
**Step 5:** Modify the TDengine configuration file (the file/etc/taos/taos.cfg for all nodes needs to be modified). Assume that the first data node End Point to be started is [h1.taosdata.com](http://h1.taosdata.com/): 6030, and its parameters related to cluster configuration are as follows:
```
// firstEp is the first data node connected after each data nodes first launch
firstEp h1.taosdata.com:6030
// Must configure it as the FQDN of this data node. If this machine has only one hostname, you can comment out this configuration
fqdn h1.taosdata.com
// Configure the port number of this data node, the default is 6030
serverPort 6030
// For application scenarios, please refer to the section “Use of Arbitrator”
arbitrator ha.taosdata.com:6042
```
The parameters that must be modified are firstEp and fqdn. At each data node, every firstEp needs to be configured to be the same, **but fqdn must be configured to the value of the data node where it is located**. Other parameters may not be modified unless you have clear reasons.
**The data node dnode added to the cluster must be exactly the same as the 11 parameters in the following table related to the cluster, otherwise it cannot be successfully added to the cluster.**
| **#** | **Configuration Parameter Name** | **Description** |
| ----- | -------------------------------- | ------------------------------------------------------------ |
| 1 | numOfMnodes | Number of management nodes in system |
| 2 | mnodeEqualVnodeNum | A mnode equals to the number of vnodes consumed |
| 3 | offlineThreshold | Offline threshold of dnode to judge if the dnode is offline |
| 4 | statusInterval | The interval for dnode to report its status to mnode |
| 5 | arbitrator | The end point of the arbitrator in system |
| 6 | timezone | Time zone |
| 7 | locale | Location information and coding format of system |
| 8 | charset | Character set encoding |
| 9 | balance | Whether to start load balancing |
| 10 | maxTablesPerVnode | The maximum number of tables that can be created in each vnode |
| 11 | maxVgroupsPerDb | The maximum number of vgroups that can be used per DB |
## <a class="anchor" id="node-one"></a> Launch the First Data Node
Follow the instructions in "[Getting started](https://www.taosdata.com/en/documentation/getting-started/)", launch the first data node, such as [h1.taosdata.com](http://h1.taosdata.com/), then execute taos, start the taos shell, and execute command "show dnodes" from the shell; ", as follows:
```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
taos> show dnodes;
id | end_point | vnodes | cores | status | role | create_time |
=====================================================================================
1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
Query OK, 1 row(s) in set (0.006385s)
taos>
```
In the above command, you can see that the End Point of the newly launched data node is: [h1.taos.com](http://h1.taos.com/): 6030, which is the firstEP of the new cluster.
## <a class="anchor" id="node-other"></a>Launch Subsequent Data Nodes
To add subsequent data nodes to the existing cluster, there are the following steps:
1. Start taosd at each physical node according to the chapter "[Getting started](https://www.taosdata.com/en/documentation/getting-started/)";
2. On the first data node, use CLI program taos to log in to TDengine system and execute the command:
```
CREATE DNODE "h2.taos.com:6030";
```
Add the End Point of the new data node (learned in Step 4 of the preparation) to the cluster's EP list. **"fqdn: port" needs to be enclosed in double quotation marks**, otherwise an error will occur. Notice that the example "[h2.taos.com](http://h2.taos.com/): 6030" is replaced with the End Point for this new data node.
3. And then execute the command
1. ```
SHOW DNODES;
```
2. Check to see if the new node was successfully joined. If the added data node is offline, then check:
1. - Check whether the taosd of this data node is working properly. If it is not working properly, you need to check the reason first
- Check the first few lines of the data node taosd log file taosdlog.0 (usually in the /var/log/taos directory) to see if the data node fqdn and port number output in the log are the just added End Point. If not, you need to add the correct End Point.
According to the above steps, new data nodes can be continuously added to the cluster.
**Tips**:
- Any data node that has joined the cluster online can be used as the firstEP of the subsequent node to be joined.
- firstEp is only effective when the data node joins the cluster for the first time. After joining the cluster, the data node will save the latest End Point list of mnode and no longer rely on this parameter.
- The two dnode data nodes dnode that are not configured with the firstEp parameter will run independently after startup. At this time, one data node cannot be added to another data node to form a cluster. **You cannot merge two independent clusters into a new cluster**.
## <a class="anchor" id="management"></a> Data Node Management
The above has already introduced how to build clusters from scratch. After the cluster is formed, new data nodes can be added at any time for expansion, or data nodes can be deleted, and the current status of the cluster can be checked.
### Add data nodes
Execute CLI program taos, log in to the system using root account, and execute:
```
CREATE DNODE "fqdn:port";
```
Add the End Point for the new data node to the cluster's EP list. **"fqdn: port" needs to be enclosed in double quotation marks**, otherwise an error will occur. The fqdn and port of a data node's external service can be configured through the configuration file taos.cfg, which is automatically obtained by default. [It is strongly not recommended to configure FQDN with automatic acquisition, which may cause the End Point of the generated data node to be not expected]
### Delete data nodes
Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
```
DROP DNODE "fqdn:port";
```
Where fqdn is the FQDN of the deleted node, and port is the port number of its external server.
<font color=green>**【Note】**</font>
- Once a data node is dropped, it cannot rejoin the cluster. This node needs to be redeployed (emptying the data folder). The cluster migrates the data from the dnode before it completes the drop dnode operation.
- Note that dropping a dnode and stopping the taosd process are two different concepts. Don't be confused: the data migration operation must be performed before deleting a dnode, thus the deleted dnode must remain online. The taosd process cannot be stopped until the delete operation is completed.
- After a data node is dropped, other nodes will perceive the deletion of this dnodeID, and no node in any cluster will receive the request of the dnodeID.
- dnodeID is automatically assigned by the cluster and cannot be specified manually. It is incremented at the time of generation and does not repeat.
### View data nodes
Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
```
SHOW DNODES;
```
All dnodes, fqdn: port for each dnode, status (ready, offline, etc.), number of vnodes, number of unused vnodes in the cluster will be listed. You can use this command to view after adding or deleting a data node.
### View virtual node group
In order to make full use of multi-core technology and provide scalability, data needs to be processed in partitions. Therefore, TDengine will split the data of a DB into multiple parts and store them in multiple vnodes. These vnodes may be distributed in multiple data node dnodes, thus realizing scale-out. A vnode belongs to only one DB, but a DB can have multiple vnodes. vnode is allocated automatically by mnode according to the current system resources without any manual intervention.
Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
```
SHOW VGROUPS;
```
## <a class="anchor" id="high-availability"></a> High-availability of vnode
TDengine provides high-availability of system through a multi-replica mechanism, including high-availability of vnode and mnode.
The number of replicas of vnode is associated with DB. There can be multiple DBs in a cluster. Each DB can be configured with different replicas according to operational requirements. When creating a database, specify the number of replicas with parameter replica (the default is 1). If the number of replicas is 1, the reliability of the system cannot be guaranteed. As long as the node where the data is located goes down, the service cannot be provided. The number of nodes in the cluster must be greater than or equal to the number of replicas, otherwise the error "more dnodes are needed" will be returned when creating a table. For example, the following command will create a database demo with 3 replicas:
```
CREATE DATABASE demo replica 3;
```
The data in a DB will be partitioned and splitted into multiple vnode groups. The number of vnodes in a vnode group is the number of replicas of the DB, and the data of each vnode in the same vnode group is completely consistent. In order to ensure high-availability, the vnodes in a vnode group must be distributed in different dnode data nodes (in actual deployment, they need to be on different physical machines). As long as more than half of the vnodes in a vgroup are working, the vgroup can be normally serving.
There may be data from multiple DBs of data in a data node dnode, so when a dnode is offline, it may affect multiple DBs. If half or more of the vnodes in a vnode group do not work, then the vnode group cannot serve externally and cannot insert or read data, which will affect the reading and writing operations of some tables in the DB to which it belongs.
Because of the introduction of vnode, it is impossible to simply draw a conclusion: "If more than half of the data nodes in the cluster work in dnode, the cluster should work." But for simple cases, it is easier to judge. For example, if the number of replicas is 3 and there are only 3 dnodes, the whole cluster can still work normally if only one node does not work, but if two data nodes do not work, the whole cluster cannot work normally.
## <a class="anchor" id="mnode"></a> High-availability of mnode
TDengine cluster is managed by mnode (a module of taosd, management node). In order to ensure the high-availability of mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes, and the effective range is 1-3. In order to ensure the strong consistency of metadata, mnode replicas are duplicated synchronously.
A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically specified by the system according to the resource situation on the whole. User can execute the following command in the console of TDengine through the CLI program taos:
```
SHOW MNODES;
```
To view the mnode list, which lists the End Point and roles (master, slave, unsynced, or offline) of the dnode where the mnode is located. When the first data node in the cluster starts, the data node must run an mnode instance, otherwise the dnode of the data node cannot work properly because a system must have at least one mnode. If numOfMnodes is configured to 2, when the second dnode is started, the latter will also run an mnode instance.
To ensure the high-availability of mnode service, numOfMnodes must be set to 2 or greater. Because the metadata saved by mnode must be strongly consistent, if numOfMnodes is greater than 2, the duplication parameter quorum is automatically set to 2, that is to say, at least two replicas must be guaranteed to write the data successfully before notifying the client application of successful writing.
**Note:** A TDengine highly-available system, whether vnode or mnode, must be configured with multiple replicas.
## <a class="anchor" id="load-balancing"></a> Load Balancing
There are three situations in which load balancing will be triggered, and no manual intervention is required.
- When a new data node is added to the cluster, the system will automatically trigger load balancing, and the data on some nodes will be automatically migrated to the new data node without any manual intervention.
- When a data node is removed from the cluster, the system will automatically migrate the data on the data node to other data nodes without any manual intervention.
- If a data node is overheated (too large amount of data), the system will automatically load balance and migrate some vnodes of the data node to other nodes.
When the above three situations occur, the system will start a load computing of each data node to decide how to migrate.
**[Tip] Load balancing is controlled by parameter balance, which determines whether to start automatic load balancing.**
## <a class="anchor" id="offline"></a> Offline Processing of Data Nodes
If a data node is offline, the TDengine cluster will automatically detect it. There are two detailed situations:
- If the data node is offline for more than a certain period of time (configuration parameter offlineThreshold in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again.
- After offline, the system will automatically start the data recovery process if it goes online again within the duration of offlineThreshold. After the data is fully recovered, the node will start to work normally.
**Note:** If each data node belonging to a virtual node group (including mnode group) is in offline or unsynced state, Master can only be elected after all data nodes in the virtual node group are online and can exchange status information, and the virtual node group can serve externally. For example, the whole cluster has 3 data nodes with 3 replicas. If all 3 data nodes go down and then 2 data nodes restart, it will not work. Only when all 3 data nodes restart successfully can serve externally again.
## <a class="anchor" id="arbitrator"></a> How to Use Arbitrator
If the number of replicas is even, it is impossible to elect a master from a vnode group when half of the vnodes are not working. Similarly, when half of the mnodes are not working, the master of the mnode cannot be elected because of the "split brain" problem. To solve this problem, TDengine introduced the concept of Arbitrator. Arbitrator simulates a vnode or mnode working, but is simply responsible for networking, and does not handle any data insertion or access. As long as more than half of the vnodes or mnodes, including the Arbitrator, work, the vnode group or mnode group can normally provide data insertion or query services. For example, in the case of 2 replicas, if one node A is offline, but the other node B is normal on and can connect to the Arbitrator, then node B can work normally.
In a word, under the current version, TDengine recommends configuring Arbitrator in double-replica environment to improve the availability.
The name of the executable for Arbitrator is tarbitrator. The executable has almost no requirements for system resources, just need to ensure a network connection, with any Linux server to run it. The following briefly describes the steps to install the configuration:
1. Click [Package Download](https://www.taosdata.com/cn/all-downloads/), and in the TDengine Arbitrator Linux section, select the appropriate version to download and install.
2. The command line parameter -p of this application can specify the port number of its external service, and the default is 6042.
3. Modify the configuration file of each taosd instance, and set parameter arbitrator to the End Point corresponding to the tarbitrator in taos.cfg. (If this parameter is configured, when the number of replicas is even, the system will automatically connect the configured Arbitrator. If the number of replicas is odd, even if the Arbitrator is configured, the system will not establish a connection.)
4. The Arbitrator configured in the configuration file will appear in the return result of instruction `SHOW DNODES`; the value of the corresponding role column will be "arb".

View File

@ -0,0 +1,496 @@
# TDengine Operation and Maintenance
## <a class="anchor" id="planning"></a> Capacity Planing
Using TDengine to build an IoT big data platform, computing resource and storage resource need to be planned according to business scenarios. The following is a discussion of the memory, CPU and hard disk space required for the system to run.
### Memory requirements
Each DB can create a fixed number of vgroups, which is the same as the CPU cores by default and can be configured by maxVgroupsPerDb; each replica in the vgroup would be a vnode; each vnode takes up a fixed amount of memory (the size is related to the database's configuration parameters blocks and cache); each table takes up memory related to the total length of the tag; in addition, the system will have some fixed memory overhead. Therefore, the system memory required for each DB can be calculated by the following formula:
```
Database Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
```
Example: Assuming a 4-core machine, cache is the default size of 16M, blocks is the default value of 6, assuming there are 100,000 tables, and the total tag length is 256 bytes, the total memory requirement is: 4 * (16 * 6 + 10) + 100,000 * (0.25 + 0.5)/1000 = 499M.
The actual running system often stores the data in different DBs according to different characteristics of the data. All these shall be considered when planning.
If there is plenty of memory, the configuration of Blocks can be increased so that more data will be stored in memory and the query speed will be improved.
### CPU requirements
CPU requirements depend on the following two aspects:
- **Data insertion** TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of inserts, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches.
- **Query requirements** TDengine to provide efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to determine.
Therefore, only for data insertion, CPU can be estimated, but the computing resources consumed by query cannot be that clear. In the actual operation, it is not recommended to make CPU utilization rate over 50%. After that, new nodes need to be added to bring more computing resources.
### Storage requirements
Compared with general databases, TDengine has an ultra-high compression ratio. In most scenarios, the compression ratio of TDengine will not be less than 5:1, and in some scenarios, maybe over 10:1, depending on the actual data characteristics. The raw data size before compressed can be calculated as follows:
```
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
```
Example: 10 million smart meters, each meter collects data every 15 minutes, and the data collected each time is 128 bytes, so the original data amount in one year is: 10000000 * 128 * 24 * 60/15 * 365 = 44.8512 T. The TDengine consumes approximately 44.851/5 = 8.97024 T.
User can set the maximum retention time of data on disk through parameter `keep`. In order to further reduce the storage cost, TDengine also provides tiered storage. The coldest data can be stored on the cheapest storage media. Application access does not need to be adjusted, but lower reading speed.
To improve speed, multiple hard disks can be configured so that data can be written or read concurrently. It should be reminded that TDengine provides high reliability of data in the form of multiple replicas, so it is no longer necessary to use expensive disk arrays.
### Number of physical or virtual machines
According to the above estimation of memory, CPU and storage, we can know how many cores, how much memory and storage space the whole system needs. If the number of data replicas is not 1, the total demand needs to be multiplied by the number of replicas.
Because TDengine provides great scale-out feature, it is easy to decide how many physical or virtual machines need to be purchased according to the total amount and the resources of a single physical/ virtual machine.
**Calculate CPU, memory and storage immediately, see:** [**Resource Estimation**](https://www.taosdata.com/config/config.html)
### Fault Tolerance and Disaster Recovery
### Fault tolerance
TDengine supports WAL (Write Ahead Log) mechanism to realize fault tolerance of data and ensure high-availability of data.
When TDengine receives the application's request packet, it first writes the requested original packet into the database log file, and then deletes the corresponding WAL after the data is successfully written. This ensures that TDengine can recover data from the database log file when the service is restarted due to power failure or other reasons, thus avoiding data loss.
There are two system configuration parameters involved:
- walLevel: WAL level, 0: do not write wal; 1: write wal, but do not execute fsync; 2: write wal and execute fsync.
- fsync: the cycle in which fsync is executed when walLevel is set to 2. Setting to 0 means that fsync is executed immediately whenever there is a write.
To guarantee 100% data safe, you need to set walLevel to 2 and fsync to 0. In that way, the write speed will decrease. However, if the number of threads starting to write data on the application side reaches a certain number (more than 50), the performance of writing data will also be good, only about 30% lower than that of fsync set to 3000 milliseconds.
### Disaster recovery
The cluster of TDengine provides high-availability of the system and implements disaster recovery through the multipl-replica mechanism.
TDengine cluster is managed by mnode. In order to ensure the high reliability of the mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes. In order to support high reliability, it needs to be set to be greater than 1. In order to ensure the strong consistency of metadata, mnode replicas duplicate data synchronously to ensure the strong consistency of metadata.
The number of replicas of time-series data in TDengine cluster is associated with databases. There can be multiple databases in a cluster, and each database can be configured with different replicas. When creating a database, specify the number of replicas through parameter replica. In order to support high reliability, it is necessary to set the number of replicas greater than 1.
The number of nodes in TDengine cluster must be greater than or equal to the number of replicas, otherwise an error will be reported in table creation.
When the nodes in TDengine cluster are deployed on different physical machines and multiple replicas are set, the high reliability of the system is implemented without using other software or tools. TDengine Enterprise Edition can also deploy replicas in different server rooms, thus realizing remote disaster recovery.
## <a class="anchor" id="config"></a> Server-side Configuration
The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter -c from the taosd command line. Such as taosd-c/home/user, to specify that the configuration file is located in the /home/user directory.
You can also use “-C” to show the current server configuration parameters:
```
taosd -C
```
Only some important configuration parameters are listed below. For more parameters, please refer to the instructions in the configuration file. Please refer to the previous chapters for detailed introduction and function of each parameter, and the default of these parameters is working and generally does not need to be set. **Note: After the configuration is modified, \*taosd service\* needs to be restarted to take effect.**
- firstEp: end point of the first dnode in the actively connected cluster when taosd starts, the default value is localhost: 6030.
- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you are accustomed to IP address access, you can set it to the IP address of the node.
- serverPort: the port number of the external service after taosd started, the default value is 6030.
- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041.
- dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos).
- logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos).
- arbitrator: the end point of the arbiter in the system; the default value is null.
- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; caannot be an mnode, only vnode can be allocated
- debugFlage: run the log switch. 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs). Default value: 131 or 135 (different modules have different default values).
- numOfLogLines: the maximum number of lines allowed for a single log file. Default: 10,000,000 lines.
- logKeepDays: the maximum retention time of the log file. When it is greater than 0, the log file will be renamed to taosdlog.xxx, where xxx is the timestamp of the last modification of the log file in seconds. Default: 0 days.
- maxSQLLength: the maximum length allowed for a single SQL statement. Default: 65380 bytes.
- telemetryReporting: whether TDengine is allowed to collect and report basic usage information. 0 means not allowed, and 1 means allowed. Default: 1.
- stream: whether continuous query (a stream computing function) is enabled, 0 means not allowed, 1 means allowed. Default: 1.
- queryBufferSize: the amount of memory reserved for all concurrent queries. The calculation rule can be multiplied by the number of the table according to the maximum possible concurrent number in practical application, and then multiplied by 170. The unit is MB (in versions before 2.0. 15, the unit of this parameter is byte).
- ratioOfQueryCores: set the maximum number of query threads. The minimum value of 0 means that there is only one query thread; the maximum value of 2 indicates that the maximum number of query threads established is 2 times the number of CPU cores. The default is 1, which indicates the maximum number of query threads equals to the number of CPU cores. This value can be a decimal, that is, 0.5 indicates that the query thread with half of the maximum CPU cores is established.
**Note:** for ports, TDengine will use 13 continuous TCP and UDP port numbers from serverPort, so be sure to open them in the firewall. Therefore, if it is the default configuration, a total of 13 ports from 6030 to 6042 need to be opened, and the same for both TCP and UDP.
Data in different application scenarios often have different data characteristics, such as retention days, number of replicas, collection frequency, record size, number of collection points, compression, etc. In order to obtain the best efficiency in storage, TDengine provides the following storage-related system configuration parameters:
- days: the time span for a data file to store data, in days, the default value is 10.
- keep: the number of days to keep data in the database, in days, default value: 3650.
- minRows: the minimum number of records in a file block, in pieces, default: 100.
- maxRows: the maximum number of records in a file block, in pieces, default: 4096.
- comp: file compression flag bit, 0: off; 1: one-stage compression; 2: two-stage compression. Default: 2.
- walLevel: WAL level. 1: write wal, but do not execute fsync; 2: write wal and execute fsync. Default: 1.
- fsync: the period during which fsync is executed when wal is set to 2. Setting to 0 means that fsync is executed immediately whenever a write happens, in milliseconds, and the default value is 3000.
- cache: the size of the memory block in megabytes (MB), default: 16.
- blocks: how many cache-sized memory blocks are in each VNODE (TSDB). Therefore, the memory size used by a VNODE is roughly (cache * blocks), in blocks, and the default value is 4.
- replica: number of replicas; value range: 1-3, in items, default value: 1
- precision: timestamp precision identification, ms for milliseconds and us for microseconds. Default: ms
- cacheLast: whether the sub-table last_row is cached in memory, 0: off; 1: on. Default: 0. (This parameter is supported as of version 2.0. 11)
For an application scenario, there may be data with multiple characteristics coexisting. The best design is to put tables with the same data characteristics in one database. Such an application has multiple databases, and each one can be configured with different storage parameters, thus ensuring the optimal performance of the system. TDengine allows the application to specify the above storage parameter in database creation. If specified, the parameters will override the corresponding system configuration parameters. For example, there is the following SQL:
```
create database demo days 10 cache 32 blocks 8 replica 3 update 1;
```
The SQL creates a database demo, each data file stores 10 days of data, the memory block is 32 megabytes, each VNODE occupies 8 memory blocks, the number of replicas is 3, updates are allowed, and other parameters are completely consistent with the system configuration.
When adding a new dnode to the TDengine cluster, some parameters related to the cluster must be the same as the configuration of the existing cluster, otherwise it cannot be successfully added to the cluster. The parameters that will be verified are as follows:
- numOfMnodes: the number of management nodes in the system. Default: 3.
- balance: whether to enable load balancing. 0: No, 1: Yes. Default: 1.
- mnodeEqualVnodeNum: an mnode is equal to the number of vnodes consumed. Default: 4.
- offlineThreshold: the threshold for a dnode to be offline, exceed which the dnode will be removed from the cluster. The unit is seconds, and the default value is 86400*10 (that is, 10 days).
- statusInterval: the length of time dnode reports status to mnode. The unit is seconds, and the default value is 1.
- maxTablesPerVnode: the maximum number of tables that can be created in each vnode. Default: 1000000.
- maxVgroupsPerDb: the maximum number of vgroups that can be used in each database.
- arbitrator: the end point of the arbiter in system, which is empty by default.
- See Client Configuration for the configuration of timezone, locale and charset.
For the convenience of debugging, the log configuration of each dnode can be temporarily adjusted through SQL statements, and all will be invalid after system restarting:
```mysql
ALTER DNODE <dnode_id> <config>
```
- dnode_id: available from the SQL statement "SHOW DNODES" command
- config: the log parameter to be adjusted, and the value is taken in the following list
resetlog truncates the old log file and creates a new log file debugFlag < 131 135 143 > Set debugFlag to 131, 135 or 143.
For example:
```
alter dnode 1 debugFlag 135;
```
## <a class="anchor" id="client"></a> Client Configuration
The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter -c to specify the configuration file directory, such as taos-c/home/cfg, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information taos --help. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg.
**Versions after 2.0. 10.0 support the following parameters on command line to display the current client configuration parameters**
```bash
taos -C 或 taos --dump-config
```
Client configuration parameters:
- firstEp: end point of the first taosd instance in the actively connected cluster when taos is started, the default value is localhost: 6030.
- secondEp: when taos starts, if not impossible to connect to firstEp, it will try to connect to secondEp.
- locale
Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API
TDengine provides a special field type nchar for storing non-ASCII encoded wide characters such as Chinese, Japanese and Korean. The data written to the nchar field will be uniformly encoded in UCS4-LE format and sent to the server. It should be noted that the correctness of coding is guaranteed by the client. Therefore, if users want to normally use nchar fields to store non-ASCII characters such as Chinese, Japanese, Korean, etc., its needed to set the encoding format of the client correctly.
The characters inputted by the client are all in the current default coding format of the operating system, mostly UTF-8 on Linux systems, and some Chinese system codes may be GB18030 or GBK, etc. The default encoding in the docker environment is POSIX. In the Chinese versions of Windows system, the code is CP936. The client needs to ensure that the character set it uses is correctly set, that is, the current encoded character set of the operating system running by the client, in order to ensure that the data in nchar is correctly converted into UCS4-LE encoding format.
The naming rules of locale in Linux are: < language > _ < region >. < character set coding >, such as: zh_CN.UTF-8, zh stands for Chinese, CN stands for mainland region, and UTF-8 stands for character set. Character set encoding provides a description of encoding transformations for clients to correctly parse local strings. Linux system and Mac OSX system can determine the character encoding of the system by setting locale. Because the locale used by Windows is not the POSIX standard locale format, another configuration parameter charset is needed to specify the character encoding under Windows. You can also use charset to specify character encoding in Linux systems.
- charset
Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API
If charset is not set in the configuration file, in Linux system, when taos starts up, it automatically reads the current locale information of the system, and parses and extracts the charset encoding format from the locale information. If the automatic reading of locale information fails, an attempt is made to read the charset configuration, and if the reading of the charset configuration also fails, the startup process is interrupted.
In Linux system, locale information contains character encoding information, so it is unnecessary to set charset separately after setting locale of Linux system correctly. For example:
```
locale zh_CN.UTF-8
```
On Windows systems, the current system encoding cannot be obtained from locale. If string encoding information cannot be read from the configuration file, taos defaults to CP936. It is equivalent to adding the following to the configuration file:
```
charset CP936
```
If you need to adjust the character encoding, check the encoding used by the current operating system and set it correctly in the configuration file.
In Linux systems, if user sets both locale and charset encoding charset, and the locale and charset are inconsistent, the value set later will override the value set earlier.
```
locale zh_CN.UTF-8
charset GBK
```
The valid value for charset is GBK.
And the valid value for charset is UTF-8.
The configuration parameters of log are exactly the same as those of server.
- timezone
Default value: get the current time zone option dynamically from the system
The time zone in which the client runs the system. In order to deal with the problem of data writing and query in multiple time zones, TDengine uses Unix Timestamp to record and store timestamps. The characteristics of UNIX timestamps determine that the generated timestamps are consistent at any time regardless of any time zone. It should be noted that UNIX timestamps are converted and recorded on the client side. In order to ensure that other forms of time on the client are converted into the correct Unix timestamp, the correct time zone needs to be set.
In Linux system, the client will automatically read the time zone information set by the system. Users can also set time zones in profiles in a number of ways. For example:
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
All above are legal to set the format of the East Eight Zone.
The setting of time zone affects the content of non-Unix timestamp (timestamp string, parsing of keyword now) in query and writing SQL statements. For example:
```sql
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
In East Eight Zone, the SQL statement is equivalent to
```sql
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
In the UTC time zone, the SQL statement is equivalent to
```sql
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15: 52: 01.123 +08:00, or ISO-8601 format timestamp strings 2013-04-12T15: 52: 01.123 +0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located.
When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg.
- maxBinaryDisplayWidth
The upper limit of the display width of binary and nchar fields in a shell, beyond which parts will be hidden. Default: 30. You can modify this option dynamically in the shell with the command set max_binary_display_width nn.
## <a class="anchor" id="user"></a>User Management
System administrators can add and delete users in CLI, and also modify passwords. The SQL syntax in the CLI is as follows:
```sql
CREATE USER <user_name> PASS <'password'>;
```
Create a user, and specify the user name and password. The password needs to be enclosed in single quotation marks. The single quotation marks are in English half-width.
```sql
DROP USER <user_name>;
```
Delete a user, root only.
```sql
ALTER USER <user_name> PASS <'password'>;
```
Modify the user password. In order to avoid being converted to lowercase, the password needs to be quoted in single quotation marks. The single quotation marks are in English half-width
```sql
ALTER USER <user_name> PRIVILEGE <write|read>;
```
Modify the user privilege to: write or read, without adding single quotation marks.
Note: There are three privilege levels: super/write/read in the system, but it is not allowed to give super privilege to users through alter instruction at present.
```mysql
SHOW USERS;
```
Show all users
**Note:** In SQL syntax, < > indicates the part that requires user to input, but do not enter < > itself
## <a class="anchor" id="import"></a> Import Data
TDengine provides a variety of convenient data import functions, including imported by script file, by data file, and by taosdump tool.
**Import by script file**
TDengine shell supports source filename command, which is used to run SQL statements from a file in batch. Users can write SQL commands such as database building, table building and data writing in the same file. Each command has a separate line. By running source command in the shell, SQL statements in the file can be run in batches in sequence. SQL statements beginning with '#' are considered comments and are automatically ignored by the shell.
**Import by data file**
TDengine also supports data import from CSV files on existing tables in the shell. The CSV file belongs to only one table, and the data format in the CSV file should be the same as the structure of the table to be imported. When importing, its syntax is as follows:
```mysql
insert into tb1 file 'path/data.csv';
```
Note: if there is descriptive information in the first line of the CSV file, please delete it manually before importing
For example, there is now a sub-table d1001 whose table structure is as follows:
```mysql
taos> DESCRIBE d1001
Field | Type | Length | Note |
=================================================================================
ts | TIMESTAMP | 8 | |
current | FLOAT | 4 | |
voltage | INT | 4 | |
phase | FLOAT | 4 | |
location | BINARY | 64 | TAG |
groupid | INT | 4 | TAG |
```
And the format of the data.csv to import is as follows:
```csv
'2018-10-04 06:38:05.000',10.30000,219,0.31000
'2018-10-05 06:38:15.000',12.60000,218,0.33000
'2018-10-06 06:38:16.800',13.30000,221,0.32000
'2018-10-07 06:38:05.000',13.30000,219,0.33000
'2018-10-08 06:38:05.000',14.30000,219,0.34000
'2018-10-09 06:38:05.000',15.30000,219,0.35000
'2018-10-10 06:38:05.000',16.30000,219,0.31000
'2018-10-11 06:38:05.000',17.30000,219,0.32000
'2018-10-12 06:38:05.000',18.30000,219,0.31000
```
Then we can use the following command to import:
```mysql
taos> insert into d1001 file '~/data.csv';
Query OK, 9 row(s) affected (0.004763s)
```
**Import via taosdump tool**
TDengine provides a convenient database import and export tool, taosdump. Users can import data exported by taosdump from one system into other systems. Please refer to the blog: [User Guide of TDengine DUMP Tool](https://www.taosdata.com/blog/2020/03/09/1334.html).
## <a class="anchor" id="export"></a> Export Data
To facilitate data export, TDengine provides two export methods, namely, export by table and export by taosdump.
**Export CSV file by table**
If user needs to export data from a table or a STable, it can run in a shell
```mysql
select * from <tb_name> >> data.csv;
```
In this way, the data in table tb_name will be exported to the file data.csv in CSV format.
**Export data by taosdump**
TDengine provides a convenient database export tool, taosdump. Users can choose to export all databases, a database or a table in a database, all data or data for a time period, or even just the definition of a table as needed. Please refer to the blog: [User Guide of TDengine DUMP Tool](https://www.taosdata.com/blog/2020/03/09/1334.html)
## <a class="anchor" id="status"></a> System Connection and Task Query Management
The system administrator can query the connection, ongoing query and stream computing of the system from CLI, and can close the connection and stop the ongoing query and stream computing. The SQL syntax in the CLI is as follows:
```mysql
SHOW CONNECTIONS;
```
Show the connection of the database, and one column shows ip: port, which is the IP address and port number of the connection.
```mysql
KILL CONNECTION <connection-id>;
```
Force the database connection to close, where connection-id is the number in the first column displayed in SHOW CONNECTIONS.
```mysql
SHOW QUERIES;
```
Show the data query, where the two numbers separated by colons displayed in the first column are query-id and the connection-id that initiated the query application connection and the number of queries.
```mysql
KILL QUERY <query-id>;
```
Force to close the data query, where query-id is the connection-id: query-no string displayed in SHOW QUERIES, such as "105: 2", copy and paste it.
```mysql
SHOW STREAMS;
```
Show the stream computing, where the first column shows the two numbers separated by colons as stream-id and the connection-id to start the stream application connection and the number of times the stream was initiated.
```mysql
KILL STREAM <stream-id>;
```
Force to turn off the stream computing, in which stream-id is the connection-id: stream-no string displayed in SHOW STREAMS, such as 103: 2, copy and paste it.
## System Monitoring
After TDengine is started, it will automatically create a monitoring database log and write the server's CPU, memory, hard disk space, bandwidth, number of requests, disk read-write speed, slow query and other information into the database regularly. TDengine also records important system operations (such as logging in, creating, deleting databases, etc.) logs and various error alarm information and stores them in the log database. The system administrator can view the database directly from CLI or view the monitoring information through GUI on WEB.
The collection of these monitoring metrics is turned on by default, but you can modify option enableMonitor in the configuration file to turn it off or on.
## <a class="anchor" id="directories"></a> File Directory Structure
After installing TDengine, the following directories or files are generated in the operating system by default:
| **Directory/File** | **Description** |
| ------------------------- | ------------------------------------------------------------ |
| /usr/local/taos/bin | TEngines executable directory. The executables are connected to the/usr/bin directory via softly links. |
| /usr/local/taos/connector | TDengines various connector directories. |
| /usr/local/taos/driver | TDengines dynamic link library directory. Connect to /usr/lib directory via soft links. |
| /usr/local/taos/examples | TDengines application example directory for various languages. |
| /usr/local/taos/include | TDengines header files of C interface for externally serving. |
| /etc/taos/taos.cfg | TDengines default [configuration files]. |
| /var/lib/taos | TDengines default data file directory, where the local can be modified via [configuration files]. |
| /var/log/taos | TDengines default log file directory, where the local can be modified via [configuration files]. |
**Executables**
All executables of TDengine are stored in the directory /usr/local/taos/bin by default. Including:
- *taosd*: TDengine server-side executable
- *taos*: TDengine Shell executable
- *taosdump*: A data import/export tool
- remove.sh: uninstall the TDengine script, please execute carefully, and link to rmtaos command in the/usr/bin directory. The TDengine installation directory /usr/local/taos will be removed, but/etc/taos,/var/lib/taos,/var/log/taos will remain.
You can configure different data directories and log directories by modifying system configuration file taos.cfg.
## <a class="anchor" id="keywords"></a> TDengine Parameter Limits and Reserved Keywords
- Database name: cannot contain "." and other special characters, and cannot exceed 32 characters
- Table name: cannot contain "." and other special characters, and cannot exceed 192 characters together with the database name to which it belongs
- Table column name: cannot contain special characters, and cannot exceed 64 characters
- Database name, table name, column name cannot begin with a number
- Number of columns in table: cannot exceed 1024 columns
- Maximum length of record: including 8 bytes as timestamp, no more than 16KB (each column of BINARY/NCHAR type will occupy an additional 2 bytes of storage location)
- Default maximum string length for a single SQL statement: 65480 bytes
- Number of database replicas: no more than 3
- User name: no more than 23 bytes
- User password: no more than 15 bytes
- Number of Tags: no more than 128
- Total length of label: cannot exceed 16K bytes
- Number of records: limited by storage space only
- Number of tables: limited only by the number of nodes
- Number of databases: limited only by the number of nodes
- Number of virtual nodes on a single database: cannot exceed 64
At the moment, TDengine has nearly 200 internal reserved keywords, which cannot be used as database name, table name, STable name, data column name or tag column name regardless of case. The list of these keywords is as follows:
| **List of Keywords** | | | | |
| -------------------- | ----------- | ------------ | ---------- | --------- |
| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING |
| ABORT | COPY | ID | MODULES | SLIMIT |
| ACCOUNT | COUNT | IF | NCHAR | SMALLINT |
| ACCOUNTS | CREATE | IGNORE | NE | SPREAD |
| ADD | CTIME | IMMEDIATE | NONE | STABLE |
| AFTER | DATABASE | IMPORT | NOT | STABLES |
| ALL | DATABASES | IN | NOTNULL | STAR |
| ALTER | DAYS | INITIALLY | NOW | STATEMENT |
| AND | DEFERRED | INSERT | OF | STDDEV |
| AS | DELIMITERS | INSTEAD | OFFSET | STREAM |
| ASC | DESC | INTEGER | OR | STREAMS |
| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING |
| AVG | DETACH | INTO | PASS | SUM |
| BEFORE | DIFF | IP | PERCENTILE | TABLE |
| BEGIN | DISTINCT | IS | PLUS | TABLES |
| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG |
| BIGINT | DNODE | JOIN | PREV | TAGS |
| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS |
| BITAND | DOT | KEY | QUERIES | TBNAME |
| BITNOT | DOUBLE | KILL | QUERY | TIMES |
| BITOR | DROP | LAST | RAISE | TIMESTAMP |
| BOOL | EACH | LE | REM | TINYINT |
| BOTTOM | END | LEASTSQUARES | REPLACE | TOP |
| BY | EQ | LIKE | REPLICA | TRIGGER |
| CACHE | EXISTS | LIMIT | RESET | UMINUS |
| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS |
| CHANGE | FAIL | LOCAL | ROW | USE |
| CLOG | FILL | LP | ROWS | USER |
| CLUSTER | FIRST | LSHIFT | RP | USERS |
| COLON | FLOAT | LT | RSHIFT | USING |
| COLUMN | FOR | MATCH | SCORES | VALUES |
| COMMA | FROM | MAX | SELECT | VARIABLE |
| COMP | GE | METRIC | SEMI | VGROUPS |
| CONCAT | GLOB | METRICS | SET | VIEW |
| CONFIGS | GRANTS | MIN | SHOW | WAVG |
| CONFLICT | GROUP | MINUS | SLASH | WHERE |
| CONNECTION | | | | |

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,161 @@
# FAQ
Tutorials & FAQ
## 0.How to report an issue?
If the contents in FAQ cannot help you and you need the technical support and assistance of TDengine team, please package the contents in the following two directories:
1./var/log/taos (if default path has not been modified)
2./etc/taos
Provide the necessary description of the problem, including the version information of TDengine used, the platform environment information, the execution operation of the problem, the characterization of the problem and the approximate time, and submit the Issue on [GitHub](https://github.com/taosdata/TDengine).
To ensure that there is enough debug information, if the problem can be repeated, please modify the/etc/taos/taos.cfg file, add a line of "debugFlag 135" at the end (without quotation marks themselves), then restart taosd, repeat the problem, and then submit. You can also temporarily set the log level of taosd through the following SQL statement.
```
alter dnode <dnode_id> debugFlag 135;
```
However, when the system is running normally, please set debugFlag to 131, otherwise a large amount of log information will be generated and the system efficiency will be reduced.
## 1.What should I pay attention to when upgrading TDengine from older versions to 2.0 and above? ☆☆☆
Version 2.0 is a complete refactoring of the previous version, and the configuration and data files are incompatible. Be sure to do the following before upgrading:
1. Delete the configuration file, execute sudo rm `-rf /etc/taos/taos.cfg`
2. Delete the log file, execute `sudo rm -rf /var/log/taos/`
3. By ensuring that the data is no longer needed, delete the data file and execute `sudo rm -rf /var/lib/taos/`
4. Install the latest stable version of TDengine
5. If you need to migrate data or the data file is corrupted, please contact the official technical support team of TAOS Data to assist
## 2. When encoutered with the error " Unable to establish connection " in Windows, what can I do?
See the [technical blog](https://www.taosdata.com/blog/2019/12/03/jdbcdriver%E6%89%BE%E4%B8%8D%E5%88%B0%E5%8A%A8%E6%80%81%E9%93%BE%E6%8E%A5%E5%BA%93/) for this issue.
## 3. Why I get “more dnodes are needed” when create a table?
See the [technical blog](https://www.taosdata.com/blog/2019/12/03/%E5%88%9B%E5%BB%BA%E6%95%B0%E6%8D%AE%E8%A1%A8%E6%97%B6%E6%8F%90%E7%A4%BAmore-dnodes-are-needed/) for this issue.
## 4. How do I generate a core file when TDengine crashes?
See the [technical blog](https://www.taosdata.com/blog/2019/12/06/tdengine-crash%E6%97%B6%E7%94%9F%E6%88%90core%E6%96%87%E4%BB%B6%E7%9A%84%E6%96%B9%E6%B3%95/) for this issue.
## 5. What should I do if I encounter an error "Unable to establish connection"?
When the client encountered a connection failure, please follow the following steps to check:
1. Check your network environment
2. - Cloud server: Check whether the security group of the cloud server opens access to TCP/UDP ports 6030-6042
- Local virtual machine: Check whether the network can be pinged, and try to avoid using localhost as hostname
- Corporate server: If you are in a NAT network environment, be sure to check whether the server can return messages to the client
2. Make sure that the client and server version numbers are exactly the same, and the open source Community Edition and Enterprise Edition cannot be mixed.
3. On the server, execute systemctl status taosd to check the running status of *taosd*. If not running, start *taosd*.
4. Verify that the correct server FQDN (Fully Qualified Domain Name, which is available by executing the Linux command hostname-f on the server) is specified when the client connects. FQDN configuration reference: "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
5. Ping the server FQDN. If there is no response, please check your network, DNS settings, or the system hosts file of the computer where the client is located.
6. Check the firewall settings (Ubuntu uses ufw status, CentOS uses firewall-cmd-list-port) to confirm that TCP/UDP ports 6030-6042 are open.
7. For JDBC (ODBC, Python, Go and other interfaces are similar) connections on Linux, make sure that libtaos.so is in the directory /usr/local/taos/driver, and /usr/local/taos/driver is in the system library function search path LD_LIBRARY_PATH.
8. For JDBC, ODBC, Python, Go, etc. connections on Windows, make sure that C:\ TDengine\ driver\ taos.dll is in your system library function search directory (it is recommended that taos.dll be placed in the directory C:\ Windows\ System32)
9. If the connection issue still exist
1. - On Linux system, please use the command line tool nc to determine whether the TCP and UDP connections on the specified ports are unobstructed. Check whether the UDP port connection works: nc -vuz {hostIP} {port} Check whether the server-side TCP port connection works: nc -l {port}Check whether the client-side TCP port connection works: nc {hostIP} {port}
- Windows systems use the PowerShell command Net-TestConnection-ComputerName {fqdn} Port {port} to detect whether the service-segment port is accessed
10. You can also use the built-in network connectivity detection function of taos program to verify whether the specified port connection between the server and the client is unobstructed (including TCP and UDP): [TDengine's Built-in Network Detection Tool Use Guide](https://www.taosdata.com/blog/2020/09/08/1816.html).
## 6.What to do if I encounter an error "Unexpected generic error in RPC" or "TDengine error: Unable to resolve FQDN"?
This error occurs because the client or data node cannot parse the FQDN (Fully Qualified Domain Name). For TAOS shell or client applications, check the following:
1. Please verify whether the FQDN of the connected server is correct. FQDN configuration reference: "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
2. If the network is configured with a DNS server, check that it is working properly.
3. If the network does not have a DNS server configured, check the hosts file of the machine where the client is located to see if the FQDN is configured and has the correct IP address.
4. If the network configuration is OK, from the machine where the client is located, you need to be able to ping the connected FQDN, otherwise the client cannot connect to the server
## 7.Although the syntax is corrected, why do I still get the “Invalid SQL" error?
If you confirm that the syntax is correct, for versions older than 2.0, please check whether the SQL statement length exceeds 64K. If it does, this error will also be returned.
## 8. Are “validation queries” supported?
The TDengine does not yet have a dedicated set of validation queries. However, it is recommended to use the database "log" monitored by the system.
## 9. Can I delete or update a record?
TDengine does not support the deletion function at present, and may support it in the future according to user requirements.
Starting from 2.0. 8.0, TDengine supports the function of updating written data. Using the update function requires using UPDATE 1 parameter when creating the database, and then you can use INSERT INTO command to update the same timestamp data that has been written. UPDATE parameter does not support ALTER DATABASE command modification. Without a database created using UPDATE 1 parameter, writing data with the same timestamp will not modify the previous data with no error reported.
It should also be noted that when UPDATE is set to 0, the data with the same timestamp sent later will be discarded directly, but no error will be reported, and will still be included in affected rows (so the return information of INSERT instruction cannot be used for timestamp duplicate checking). The main reason for this design is that TDengine regards the written data as a stream. Regardless of whether the timestamp conflicts or not, TDengine believes that the original device that generates the data actually generates such data. The UPDATE parameter only controls how such stream data should be processed when persistence-when UPDATE is 0, it means that the data written first overwrites the data written later; When UPDATE is 1, it means that the data written later overwrites the data written first. How to choose this coverage relationship depends on whether the data generated first or later is expected in the subsequent use and statistics compile.
## 10. How to create a table with more than 1024 columns?
Using version 2.0 and above, 1024 columns are supported by default; for older versions, TDengine allowed the creation of a table with a maximum of 250 columns. However, if the limit is exceeded, it is recommended to logically split this wide table into several small ones according to the data characteristics.
## 11. What is the most effective way to write data?
Insert in batches. Each write statement can insert multiple records into one or multiple tables at the same time.
## 12. What is the most effective way to write data? How to solve the problem that Chinese characters in nchar inserted under Windows systems are parsed into messy code?
If there are Chinese characters in nchar data under Windows, please first confirm that the region of the system is set to China (which can be set in the Control Panel), then the taos client in cmd should already support it normally; If you are developing Java applications in an IDE, such as Eclipse and Intellij, please confirm that the file code in the IDE is GBK (this is the default coding type of Java), and then initialize the configuration of the client when generating the Connection. The specific statement is as follows:
```JAVA
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
Connection = DriverManager.getConnection(url, properties);
```
## 13. JDBC error: the excluded SQL is not a DML or a DDL?
Please update to the latest JDBC driver.
```xml
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.27</version>
</dependency>
```
## 14. taos connect failed, reason: invalid timestamp.
The common reason is that the server time and client time are not calibrated, which can be calibrated by synchronizing with the time server (use ntpdate command under Linux, and select automatic synchronization in the Windows time setting).
## 15. Incomplete display of table name
Due to the limited display width of taos shell in the terminal, it is possible that a relatively long table name is not displayed completely. If relevant operations are carried out according to the displayed incomplete table name, a Table does not exist error will occur. The workaround can be by modifying the setting option maxBinaryDisplayWidth in the taos.cfg file, or directly entering the command `set max_binary_display_width 100`. Or, use the \\G parameter at the end of the command to adjust how the results are displayed.
## 16. How to migrate data?
TDengine uniquely identifies a machine according to hostname. When moving data files from machine A to machine B, pay attention to the following three points:
- For versions 2.0. 0.0 to 2.0. 6. x, reconfigure machine B's hostname to machine A's.
- For 2.0. 7.0 and later versions, go to/var/lib/taos/dnode, repair the FQDN corresponding to dnodeId of dnodeEps.json, and restart. Make sure this file is identical for all machines.
- The storage structures of versions 1. x and 2. x are incompatible, and it is necessary to use migration tools or your own application to export and import data.
## 17. How to temporarily adjust the log level in command line program taos?
For the convenience of debugging, since version 2.0. 16, command line program taos gets two new instructions related to logging:
```mysql
ALTER LOCAL flag_name flag_value;
```
This means that under the current command line program, modify the loglevel of a specific module (only valid for the current command line program, if taos is restarted, it needs to be reset):
- The values of flag_name can be: debugFlag, cDebugFlag, tmrDebugFlag, uDebugFlag, rpcDebugFlag
- Flag_value values can be: 131 (output error and alarm logs), 135 (output error, alarm, and debug logs), 143 (output error, alarm, debug, and trace logs)
```mysql
ALTER LOCAL RESETLOG;
```
This means wiping up all client-generated log files on the machine.

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

View File

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

View File

Before

Width:  |  Height:  |  Size: 66 KiB

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 114 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

View File

Before

Width:  |  Height:  |  Size: 92 KiB

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

View File

@ -1,167 +0,0 @@
# Connect with other tools
## Telegraf
TDengine is easy to integrate with [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/), an open-source server agent for collecting and sending metrics and events, without more development.
### Install Telegraf
At present, TDengine supports Telegraf newer than version 1.7.4. Users can go to the [download link] and choose the proper package to install on your system.
### Configure Telegraf
Telegraf is configured by changing items in the configuration file */etc/telegraf/telegraf.conf*.
In **output plugins** sectionadd _[[outputs.http]]_ iterm
- _url_: http://ip:6020/telegraf/udb, in which _ip_ is the IP address of any node in TDengine cluster. Port 6020 is the RESTful APT port used by TDengine. _udb_ is the name of the database to save data, which needs to create beforehand.
- _method_: "POST"
- _username_: username to login TDengine
- _password_: password to login TDengine
- _data_format_: "json"
- _json_timestamp_units_: "1ms"
In **agent** part
- hostname: used to distinguish different machines. Need to be unique.
- metric_batch_size: 30the maximum number of records allowed to write in Telegraf. The larger the value is, the less frequent requests are sent. For TDengine, the value should be less than 50.
Please refer to the [Telegraf docs](https://docs.influxdata.com/telegraf/v1.11/) for more information.
## Grafana
[Grafana] is an open-source system for time-series data display. It is easy to integrate TDengine and Grafana to build a monitor system. Data saved in TDengine can be fetched and shown on the Grafana dashboard.
### Install Grafana
For now, TDengine only supports Grafana newer than version 5.2.4. Users can go to the [Grafana download page] for the proper package to download.
### Configure Grafana
TDengine Grafana plugin is in the _/usr/local/taos/connector/grafana_ directory.
Taking Centos 7.2 as an example, just copy TDengine directory to _/var/lib/grafana/plugins_ directory and restart Grafana.
### Use Grafana
Users can log in the Grafana server (username/password:admin/admin) through localhost:3000 to configure TDengine as the data source. As is shown in the picture below, TDengine as a data source option is shown in the box:
![img](../assets/clip_image001.png)
When choosing TDengine as the data source, the Host in HTTP configuration should be configured as the IP address of any node of a TDengine cluster. The port should be set as 6020. For example, when TDengine and Grafana are on the same machine, it should be configured as _http://localhost:6020.
Besides, users also should set the username and password used to log into TDengine. Then click _Save&Test_ button to save.
![img](../assets/clip_image001-2474914.png)
Then, TDengine as a data source should show in the Grafana data source list.
![img](../assets/clip_image001-2474939.png)
Then, users can create Dashboards in Grafana using TDengine as the data source:
![img](../assets/clip_image001-2474961.png)
Click _Add Query_ button to add a query and input the SQL command you want to run in the _INPUT SQL_ text box. The SQL command should expect a two-row, multi-column result, such as _SELECT count(*) FROM sys.cpu WHERE ts>=from and ts<to interval(interval)_, in which, _from_, _to_ and _inteval_ are TDengine inner variables representing query time range and time interval.
_ALIAS BY_ field is to set the query alias. Click _GENERATE SQL_ to send the command to TDengine:
![img](../assets/clip_image001-2474987.png)
Please refer to the [Grafana official document] for more information about Grafana.
## Matlab
Matlab can connect to and retrieve data from TDengine by TDengine JDBC Driver.
### MatLab and TDengine JDBC adaptation
Several steps are required to adapt Matlab to TDengine. Taking adapting Matlab2017a on Windows10 as an example:
1. Copy the file _JDBCDriver-1.0.0-dist.jar_ in TDengine package to the directory _${matlab_root}\MATLAB\R2017a\java\jar\toolbox_
2. Copy the file _taos.lib_ in TDengine package to _${matlab_ root _dir}\MATLAB\R2017a\lib\win64_
3. Add the .jar package just copied to the Matlab classpath. Append the line below as the end of the file of _${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt_
`$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar`
4. Create a file called _javalibrarypath.txt_ in directory _${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\_, and add the _taos.dll_ path in the file. For example, if the file _taos.dll_ is in the directory of _C:\Windows\System32_then add the following line in file *javalibrarypath.txt*:
`C:\Windows\System32`
### TDengine operations in Matlab
After correct configuration, open Matlab:
- build a connection
`conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)`
- Query
`sql0 = [select * from tb]`
`data = select(conn, sql0);`
- Insert a record:
`sql1 = [insert into tb values (now, 1)]`
`exec(conn, sql1)`
Please refer to the file _examples\Matlab\TDengineDemo.m_ for more information.
## R
Users can use R language to access the TDengine server with the JDBC interface. At first, install JDBC package in R:
```R
install.packages('rJDBC', repos='http://cran.us.r-project.org')
```
Then use _library_ function to load the package:
```R
library('RJDBC')
```
Then load the TDengine JDBC driver:
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-1.0.0-dist.jar", identifier.quote="\"")
```
If succeed, no error message will display. Then use the following command to try a database connection:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully. TDengine supports below functions in _RJDBC_ package:
- _dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)_: write the data in a data frame _iris_ to the table _test_ in the TDengine server. Parameter _overwrite_ must be _false_. _append_ must be _TRUE_ and the schema of the data frame _iris_ should be the same as the table _test_.
- _dbGetQuery(conn, "select count(*) from test")_: run a query command
- _dbSendUpdate(conn, "use db")_: run any non-query command.
- _dbReadTable(conn, "test"_): read all the data in table _test_
- _dbDisconnect(conn)_: close a connection
- _dbRemoveTable(conn, "test")_: remove table _test_
Below functions are **not supported** currently:
- _dbExistsTable(conn, "test")_: if talbe _test_ exists
- _dbListTables(conn)_: list all tables in the connection
[Telegraf]: www.taosdata.com
[download link]: https://portal.influxdata.com/downloads
[Telegraf document]: www.taosdata.com
[Grafana]: https://grafana.com
[Grafana download page]: https://grafana.com/grafana/download
[Grafana official document]: https://grafana.com/docs/

View File

@ -1,896 +0,0 @@
# TDengine connectors
TDengine provides many connectors for development, including C/C++, JAVA, Python, RESTful, Go, Node.JS, etc.
NOTE: All APIs which require a SQL string as parameter, including but not limit to `taos_query`, `taos_query_a`, `taos_subscribe` in the C/C++ Connector and their counterparts in other connectors, can ONLY process one SQL statement at a time. If more than one SQL statements are provided, their behaviors are undefined.
## C/C++ API
C/C++ APIs are similar to the MySQL APIs. Applications should include TDengine head file _taos.h_ to use C/C++ APIs by adding the following line in code:
```C
#include <taos.h>
```
Make sure TDengine library _libtaos.so_ is installed and use _-ltaos_ option to link the library when compiling. In most cases, if the return value of an API is integer, it return _0_ for success and other values as an error code for failure; if the return value is pointer, then _NULL_ is used for failure.
### Fundamental API
Fundamentatal APIs prepare runtime environment for other APIs, for example, create a database connection.
- `void taos_init()`
Initialize the runtime environment for TDengine client. The API is not necessary since it is called int _taos_connect_ by default.
- `void taos_cleanup()`
Cleanup runtime environment, client should call this API before exit.
- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
Set client options. The parameter _option_ supports values of _TSDB_OPTION_CONFIGDIR_ (configuration directory), _TSDB_OPTION_SHELL_ACTIVITY_TIMER_, _TSDB_OPTION_LOCALE_ (client locale) and _TSDB_OPTION_TIMEZONE_ (client timezone).
- `char* taos_get_client_info()`
Retrieve version information of client.
- `TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, int port)`
Open a connection to a TDengine server. The parameters are:
* ip: IP address of the server
* user: username
* pass: password
* db: database to use, **NULL** for no database to use after connection. Otherwise, the database should exist before connection or a connection error is reported.
* port: port number to connect
The handle returned by this API should be kept for future use.
- `char *taos_get_server_info(TAOS *taos)`
Retrieve version information of server.
- `int taos_select_db(TAOS *taos, const char *db)`
Set default database to `db`.
- `void taos_close(TAOS *taos)`
Close a connection to a TDengine server by the handle returned by _taos_connect_`
### C/C++ sync API
Sync APIs are those APIs waiting for responses from the server after sending a request. TDengine has the following sync APIs:
- `TAOS_RES* taos_query(TAOS *taos, const char *sql)`
The API used to run a SQL command. The command can be DQL, DML or DDL. The parameter _taos_ is the handle returned by _taos_connect_. Return value _NULL_ means failure.
- `int taos_result_precision(TAOS_RES *res)`
Get the timestamp precision of the result set, return value _0_ means milli-second, _1_ mean micro-second and _2_ means nano-second.
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
Fetch a row of return results through _res_.
- `int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)`
Fetch multiple rows from the result set, return value is row count.
- `int taos_num_fields(TAOS_RES *res)` and `int taos_field_count(TAOS_RES* res)`
These two APIs are identical, both return the number of fields in the return result.
- `int* taos_fetch_lengths(TAOS_RES *res)`
Get the field lengths of the result set, return value is an array whose length is the field count.
- `int taos_affected_rows(TAOS_RES *res)`
Get affected row count of the executed statement.
- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)`
Fetch the description of each field. The description includes the property of data type, field name, and bytes. The API should be used with _taos_num_fields_ to fetch a row of data. The structure of `TAOS_FIELD` is:
```c
typedef struct taosField {
char name[65]; // field name
uint8_t type; // data type
int16_t bytes; // length of the field in bytes
} TAOS_FIELD;
```
- `void taos_stop_query(TAOS_RES *res)`
Stop the execution of a query.
- `void taos_free_result(TAOS_RES *res)`
Free the resources used by a result set. Make sure to call this API after fetching results or memory leak would happen.
- `char *taos_errstr(TAOS_RES *res)`
Return the reason of the last API call failure. The return value is a string.
- `int *taos_errno(TAOS_RES *res)`
Return the error code of the last API call failure. The return value is an integer.
**Note**: The connection to a TDengine server is not multi-thread safe. So a connection can only be used by one thread.
### C/C++ async API
In addition to sync APIs, TDengine also provides async APIs, which are more efficient. Async APIs are returned right away without waiting for a response from the server, allowing the application to continute with other tasks without blocking. So async APIs are more efficient, especially useful when in a poor network.
All async APIs require callback functions. The callback functions have the format:
```C
void fp(void *param, TAOS_RES * res, TYPE param3)
```
The first two parameters of the callback function are the same for all async APIs. The third parameter is different for different APIs. Generally, the first parameter is the handle provided to the API for action. The second parameter is a result handle.
- `void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);`
The async version of _taos_query_.
* taos: the handle returned by _taos_connect_.
* sql: the SQL command to run.
* fp: user defined callback function. The third parameter of the callback function _code_ is _0_ (for success) or a negative number (for failure, call taos_errstr to get the error as a string). Applications mainly handle the second parameter, the returned result set.
* param: user provided parameter which is required by the callback function.
- `void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);`
The async API to fetch a batch of rows, which should only be used with a _taos_query_a_ call.
* res: result handle returned by _taos_query_a_.
* fp: the callback function. _param_ is a user-defined structure to pass to _fp_. The parameter _numOfRows_ is the number of result rows in the current fetch cycle. In the callback function, applications should call _taos_fetch_row_ to get records from the result handle. After getting a batch of results, applications should continue to call _taos_fetch_rows_a_ API to handle the next batch, until the _numOfRows_ is _0_ (for no more data to fetch) or _-1_ (for failure).
- `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);`
The async API to fetch a result row.
* res: result handle.
* fp: the callback function. _param_ is a user-defined structure to pass to _fp_. The third parameter of the callback function is a single result row, which is different from that of _taos_fetch_rows_a_ API. With this API, it is not necessary to call _taos_fetch_row_ to retrieve each result row, which is handier than _taos_fetch_rows_a_ but less efficient.
Applications may apply operations on multiple tables. However, **it is important to make sure the operations on the same table are serialized**. That means after sending an insert request in a table to the server, no operations on the table are allowed before a response is received.
### C/C++ parameter binding API
TDengine also provides parameter binding APIs, like MySQL, only question mark `?` can be used to represent a parameter in these APIs.
- `TAOS_STMT* taos_stmt_init(TAOS *taos)`
Create a TAOS_STMT to represent the prepared statement for other APIs.
- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
Parse SQL statement _sql_ and bind result to _stmt_ , if _length_ larger than 0, its value is used to determine the length of _sql_, the API auto detects the actual length of _sql_ otherwise.
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
Bind values to parameters. _bind_ points to an array, the element count and sequence of the array must be identical as the parameters of the SQL statement. The usage of _TAOS_BIND_ is same as _MYSQL_BIND_ in MySQL, its definition is as below:
```c
typedef struct TAOS_BIND {
int buffer_type;
void * buffer;
unsigned long buffer_length; // not used in TDengine
unsigned long *length;
int * is_null;
int is_unsigned; // not used in TDengine
int * error; // not used in TDengine
} TAOS_BIND;
```
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
Add bound parameters to batch, client can call `taos_stmt_bind_param` again after calling this API. Note this API only support _insert_ / _import_ statements, it returns an error in other cases.
- `int taos_stmt_execute(TAOS_STMT *stmt)`
Execute the prepared statement. This API can only be called once for a statement at present.
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
Acquire the result set of an executed statement. The usage of the result is same as `taos_use_result`, `taos_free_result` must be called after one you are done with the result set to release resources.
- `int taos_stmt_close(TAOS_STMT *stmt)`
Close the statement, release all resources.
### C/C++ continuous query interface
TDengine provides APIs for continuous query driven by time, which run queries periodically in the background. There are only two APIs:
- `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES * res, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *));`
The API is used to create a continuous query.
* _taos_: the connection handle returned by _taos_connect_.
* _sqlstr_: the SQL string to run. Only query commands are allowed.
* _fp_: the callback function to run after a query. TDengine passes query result `row`, query state `res` and user provided parameter `param` to this function. In this callback, `taos_num_fields` and `taos_fetch_fields` could be used to fetch field information.
* _param_: a parameter passed to _fp_
* _stime_: the time of the stream starts in the form of epoch milliseconds. If _0_ is given, the start time is set as the current time.
* _callback_: a callback function to run when the continuous query stops automatically.
The API is expected to return a handle for success. Otherwise, a NULL pointer is returned.
- `void taos_close_stream (TAOS_STREAM *tstr)`
Close the continuous query by the handle returned by _taos_open_stream_. Make sure to call this API when the continuous query is not needed anymore.
### C/C++ subscription API
For the time being, TDengine supports subscription on one or multiple tables. It is implemented through periodic pulling from a TDengine server.
* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)`
The API is used to start a subscription session, it returns the subscription object on success and `NULL` in case of failure, the parameters are:
* **taos**: The database connnection, which must be established already.
* **restart**: `Zero` to continue a subscription if it already exits, other value to start from the beginning.
* **topic**: The unique identifier of a subscription.
* **sql**: A sql statement for data query, it can only be a `select` statement, can only query for raw data, and can only query data in ascending order of the timestamp field.
* **fp**: A callback function to receive query result, only used in asynchronization mode and should be `NULL` in synchronization mode, please refer below for its prototype.
* **param**: User provided additional parameter for the callback function.
* **interval**: Pulling interval in millisecond. Under asynchronization mode, API will call the callback function `fp` in this interval, system performance will be impacted if this interval is too short. Under synchronization mode, if the duration between two call to `taos_consume` is less than this interval, the second call blocks until the duration exceed this interval.
* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`
Prototype of the callback function, the parameters are:
* tsub: The subscription object.
* res: The query result.
* param: User provided additional parameter when calling `taos_subscribe`.
* code: Error code in case of failures.
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
The API used to get the new data from a TDengine server. It should be put in an loop. The parameter `tsub` is the handle returned by `taos_subscribe`. This API should only be called in synchronization mode. If the duration between two call to `taos_consume` is less than pulling interval, the second call blocks until the duration exceed the interval. The API returns the new rows if new data arrives, or empty rowset otherwise, and if there's an error, it returns `NULL`.
* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
Stop a subscription session by the handle returned by `taos_subscribe`. If `keepProgress` is **not** zero, the subscription progress information is kept and can be reused in later call to `taos_subscribe`, the information is removed otherwise.
## Java Connector
To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1].
Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver:
* libtaos.so (Linux)
After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path.
* taos.dll (Windows)
After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path.
> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15].
Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver:
* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method.
* No support for transaction
* No support for union between tables
* No support for nested query`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`.
## Version list of TAOS-JDBCDriver and required TDengine and JDK
| taos-jdbcdriver | TDengine | JDK |
| --- | --- | --- |
| 2.0.2 | 2.0.0.x or higher | 1.8.x |
| 1.0.3 | 1.6.1.x or higher | 1.8.x |
| 1.0.2 | 1.6.1.x or higher | 1.8.x |
| 1.0.1 | 1.6.1.x or higher | 1.8.x |
## DataType in TDengine and Java
The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java:
| TDengine | Java |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## How to get TAOS-JDBC Driver
### maven repository
taos-jdbcdriver has been published to [Sonatype Repository][1]:
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
Using the following pom.xml for maven projects
```xml
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.2</version>
</dependency>
</dependencies>
```
### JAR file from the source code
After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated.
## Usage
### get the connection
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> `6030` is the default port and `log` is the default database for system monitor.
A normal JDBC URL looks as follows:
`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes:
* useruser name for login, defaultly root。
* passwordpassword for logindefaultly taosdata。
* charsetcharset for clientdefaultly system charset
* cfgdirlog directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。
* localelanguage for clientdefaultly system locale。
* timezonetimezone for clientdefaultly system timezone。
The options above can be configures (`ordered by priority`):
1. JDBC URL
As explained above.
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
3. Configuration file (taos.cfg)
Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows
```properties
# client default username
# defaultUser root
# client default password
# defaultPass taosdata
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
> More options can refer to [client configuration][13]
### Create databases and tables
```java
Statement stmt = conn.createStatement();
// create database
stmt.executeUpdate("create database if not exists db");
// use database
stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables
### Insert data
```java
// insert data
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
System.out.println("insert " + affectedRows + " rows.");
```
> _now_ is the server time.
> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year).
### Query database
```java
// query data
ResultSet resultSet = stmt.executeQuery("select * from tb");
Timestamp ts = null;
int temperature = 0;
float humidity = 0;
while(resultSet.next()){
ts = resultSet.getTimestamp(1);
temperature = resultSet.getInt(2);
humidity = resultSet.getFloat("humidity");
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results.
### Close all
```java
resultSet.close();
stmt.close();
conn.close();
```
> `please make sure the connection is closed to avoid the error like connection leakage`
## Using connection pool
**HikariCP**
* dependence in pom.xml
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* Examples
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool.
> More instructions can refer to [User Guide][5]
**Druid**
* dependency in pom.xml
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* Examples
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> More instructions can refer to [User Guide][6]
**Notice**
* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`.
As follows`1` will be returned if `select server_status()` is successfully executed。
```shell
taos> select server_status();
server_status()|
================
1 |
Query OK, 1 row(s) in set (0.000141s)
```
## Python Connector
### Install TDengine Python client
Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
```cmd
pip install src/connector/python/python2/
```
or
```
pip install src/connector/python/python3/
```
If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
### Python client interfaces
To use TDengine Python client, import TDengine module at first:
```python
import taos
```
Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:
- _TDengineConnection_ class
Run `help(taos.TDengineConnection)` in python terminal for details.
- _TDengineCursor_ class
Run `help(taos.TDengineCursor)` in python terminal for details.
- connect method
Open a connection. Run `help(taos.connect)` in python terminal for details.
## RESTful Connector
TDengine also provides RESTful API to satisfy developing on different platforms. Unlike other databases, TDengine RESTful API applies operations to the database through the SQL command in the body of HTTP POST request. What users are required to provide is just a URL.
For the time being, TDengine RESTful API uses a _\<TOKEN\>_ generated from username and password for identification. Safer identification methods will be provided in the future.
### HTTP URL encoding
To use TDengine RESTful API, the URL should have the following encoding format:
```
http://<ip>:<PORT>/rest/sql
```
- _ip_: IP address of any node in a TDengine cluster
- _PORT_: TDengine HTTP service port. It is 6020 by default.
For example, the URL encoding _http://192.168.0.1:6020/rest/sql_ used to send HTTP request to a TDengine server with IP address as 192.168.0.1.
It is required to add a token in an HTTP request header for identification.
```
Authorization: Basic <TOKEN>
```
The HTTP request body contains the SQL command to run. If the SQL command contains a table name, it should also provide the database name it belongs to in the form of `<db_name>.<tb_name>`. Otherwise, an error code is returned.
For example, use _curl_ command to send a HTTP request:
```
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql
```
or use
```
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
```
where `TOKEN` is the encryted string of `{username}:{password}` using the Base64 algorithm, e.g. `root:taosdata` will be encoded as `cm9vdDp0YW9zZGF0YQ==`
### HTTP response
The HTTP resonse is in JSON format as below:
```
{
"status": "succ",
"head": ["column1","column2", …],
"data": [
["2017-12-12 23:44:25.730", 1],
["2017-12-12 22:44:25.728", 4]
],
"rows": 2
}
```
Specifically,
- _status_: the result of the operation, success or failure
- _head_: description of returned result columns
- _data_: the returned data array. If no data is returned, only an _affected_rows_ field is listed
- _rows_: the number of rows returned
### Example
- Use _curl_ command to query all the data in table _t1_ of database _demo_:
`curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql`
The return value is like:
```
{
"status": "succ",
"head": ["column1","column2","column3"],
"data": [
["2017-12-12 23:44:25.730", 1, 2.3],
["2017-12-12 22:44:25.728", 4, 5.6]
],
"rows": 2
}
```
- Use HTTP to create a database
`curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql`
The return value should be:
```
{
"status": "succ",
"head": ["affected_rows"],
"data": [[1]],
"rows": 1,
}
```
## Go Connector
TDengine provides a GO client package `taosSql`. `taosSql` implements a kind of interface of GO `database/sql/driver`. User can access TDengine by importing the package in their program with the following instructions, detailed usage please refer to `https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go`
```Go
import (
"database/sql"
_ github.com/taosdata/driver-go/taoSql“
)
```
### API
* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
Open DB, generally DRIVER_NAME will be used as a constant with default value `taosSql`, dataSourceName is a combined String with format `user:password@/tcp(host:port)/dbname`. If user wants to access TDengine with multiple goroutine concurrently, the better way is to create an sql.Open object in each goroutine to access TDengine.
**Note**: When calling this api, only a few initial work are done, instead the validity check happened during executing `Query` or `Exec`, at this time the connection will be created, and system will check if `user、password、host、port` is valid. Additionaly the most of features are implemented in the taosSql dependency lib `libtaos`, from this view, sql.Open is lightweight.
* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)`
Execute non-Query related SQLs, the execution result is stored with type of Result.
* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)`
Execute Query related SQLs, the execution result is *Raw, the detailed usage can refer GO interface `database/sql/driver`
## Node.js Connector
TDengine also provides a node.js connector package that is installable through [npm](https://www.npmjs.com/). The package is also in our source code at *src/connector/nodejs/*. The following instructions are also available [here](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs)
To get started, just type in the following to install the connector through [npm](https://www.npmjs.com/).
```cmd
npm install td-connector
```
It is highly suggested you use npm. If you don't have it installed, you can also just copy the nodejs folder from *src/connector/nodejs/* into your node project folder.
To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp)
### On Unix
- `python` (`v2.7` recommended, `v3.x.x` is **not** supported)
- `make`
- A proper C/C++ compiler toolchain, like [GCC](https://gcc.gnu.org)
### On macOS
- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) (already installed on macOS)
- Xcode
- You also need to install the
```
Command Line Tools
```
via Xcode. You can find this under the menu
```
Xcode -> Preferences -> Locations
```
(or by running
```
xcode-select --install
```
in your Terminal)
- This step will install `gcc` and the related toolchain containing `make`
### On Windows
#### Option 1
Install all the required tools and configurations using Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) using `npm install --global --production windows-build-tools` from an elevated PowerShell or CMD.exe (run as Administrator).
#### Option 2
Install tools and configuration manually:
- Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) (using "Visual C++ build tools" workload) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) (using the "Desktop development with C++" workload)
- Install [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` is not supported), and run `npm config set python python2.7` (or see below for further instructions on specifying the proper Python version and path.)
- Launch cmd, `npm config set msvs_version 2017`
If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips.
To target native ARM64 Node.js on Windows 10 on ARM, add the components "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
### Usage
The following is a short summary of the basic usage of the connector, the full api and documentation can be found [here](http://docs.taosdata.com/node)
#### Connection
To use the connector, first require the library ```td-connector```. Running the function ```taos.connect``` with the connection options passed in as an object will return a TDengine connection object. The required connection option is ```host```, other options if not set, will be the default values as shown below.
A cursor also needs to be initialized in order to interact with TDengine from Node.js.
```javascript
const taos = require('td-connector');
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
var cursor = conn.cursor(); // Initializing a new cursor
```
To close a connection, run
```javascript
conn.close();
```
#### Queries
We can now start executing simple queries through the ```cursor.query``` function, which returns a TaosQuery object.
```javascript
var query = cursor.query('show databases;')
```
We can get the results of the queries through the ```query.execute()``` function, which returns a promise that resolves with a TaosResult object, which contains the raw data and additional functionalities such as pretty printing the results.
```javascript
var promise = query.execute();
promise.then(function(result) {
result.pretty(); //logs the results to the console as if you were in the taos shell
});
```
You can also query by binding parameters to a query by filling in the question marks in a string as so. The query will automatically parse what was binded and convert it to the proper format for use with TDengine
```javascript
var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
query.execute().then(function(result) {
result.pretty();
})
```
The TaosQuery object can also be immediately executed upon creation by passing true as the second argument, returning a promise instead of a TaosQuery.
```javascript
var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
promise.then(function(result) {
result.pretty();
})
```
#### Async functionality
Async queries can be performed using the same functions such as `cursor.execute`, `cursor.query`, but now with `_a` appended to them.
Say you want to execute an two async query on two seperate tables, using `cursor.query_a`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object.
```javascript
var promise1 = cursor.query_a('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
var promise2 = cursor.query_a('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
promise1.then(function(result) {
result.pretty();
})
promise2.then(function(result) {
result.pretty();
})
```
### Example
An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) (The preferred method for using the connector)
An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
[5]: https://github.com/brettwooldridge/HikariCP
[6]: https://github.com/alibaba/druid
[7]: https://github.com/taosdata/TDengine/issues
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/documentation20/connector/#Windows
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B

View File

@ -1,35 +0,0 @@
# TaosData Contributor License Agreement
This TaosData Contributor License Agreement (CLA) applies to any contribution you make to any TaosData projects. If you are representing your employing organization to sign this agreement, please warrant that you have the authority to grant the agreement.
## Terms
**"TaosData"**, **"we"**, **"our"** and **"us"** means TaosData, inc.
**"You"** and **"your"** means you or the organization you are on behalf of to sign this agreement.
**"Contribution"** means any original work you, or the organization you represent submit to TaosData for any project in any manner.
## Copyright License
All rights of your Contribution submitted to TaosData in any manner are granted to TaosData and recipients of software distributed by TaosData. You waive any rights that my affect our ownership of the copyright and grant to us a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable, and sublicensable license to use, reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Contributions and any derivative work created based on a Contribution.
## Patent License
With respect to any patents you own or that you can license without payment to any third party, you grant to us and to any recipient of software distributed by us, a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable patent license to make, have make, use, sell, offer to sell, import, and otherwise transfer the Contribution in whole or in part, alone or included in any product under any patent you own, or license from a third party, that is necessarily infringed by the Contribution or by combination of the Contribution with any Work.
## Your Representations and Warranties
You represent and warrant that:
- the Contribution you submit is an original work that you can legally grant the rights set out in this agreement.
- the Contribution you submit and licenses you granted does not and will not, infringe the rights of any third party.
- you are not aware of any pending or threatened claims, suits, actions, or charges pertaining to the contributions. You also warrant to notify TaosData immediately if you become aware of any such actual or potential claims, suits, actions, allegations or charges.
## Support
You are not obligated to support your Contribution except you volunteer to provide support. If you want, you can provide for a fee.
**I agree and accept on behalf of myself and behalf of my organization:**

View File

@ -1,87 +0,0 @@
#Documentation
TDengine is a highly efficient platform to store, query, and analyze time-series data. It works like a relational database, but you are strongly suggested to read through the following documentation before you experience it.
##Getting Started
- Quick Start: download, install and experience TDengine in a few seconds
- TDengine Shell: command-line interface to access TDengine server
- Major Features: insert/query, aggregation, cache, pub/sub, continuous query
## Data Model and Architecture
- Data Model: relational database model, but one table for one device with static tags
- Architecture: Management Module, Data Module, Client Module
- Writing Process: records recieved are written to WAL, cache, then ack is sent back to client
- Data Storage: records are sharded in the time range, and stored column by column
##TAOS SQL
- Data Types: support timestamp, int, float, double, binary, nchar, bool, and other types
- Database Management: add, drop, check databases
- Table Management: add, drop, check, alter tables
- Inserting Records: insert one or more records into tables, historical records can be imported
- Data Query: query data with time range and filter conditions, support limit/offset
- SQL Functions: support aggregation, selector, transformation functions
- Downsampling: aggregate data in successive time windows, support interpolation
##STable: Super Table
- What is a Super Table: an innovated way to aggregate tables
- Create a STable: it is like creating a standard table, but with tags defined
- Create a Table via STable: use STable as the template, with tags specified
- Aggregate Tables via STable: group tables together by specifying the tags filter condition
- Create Table Automatically: create tables automatically with a STable as a template
- Management of STables: create/delete/alter super table just like standard tables
- Management of Tags: add/delete/alter tags on super tables or tables
##Advanced Features
- Continuous Query: query executed by TDengine periodically with a sliding window
- Publisher/Subscriber: subscribe to the newly arrived data like a typical messaging system
- Caching: the newly arrived data of each device/table will always be cached
##Connector
- C/C++ Connector: primary method to connect to the server through libtaos client library
- Java Connector: driver for connecting to the server from Java applications using the JDBC API
- Python Connector: driver for connecting to the server from Python applications
- RESTful Connector: a simple way to interact with TDengine via HTTP
- Go Connector: driver for connecting to the server from Go applications
- Node.js Connector: driver for connecting to the server from node applications
##Connections with Other Tools
- Telegraf: pass the collected DevOps metrics to TDengine
- Grafana: query the data saved in TDengine and visualize them
- Matlab: access TDengine server from Matlab via JDBC
- R: access TDengine server from R via JDBC
##Administrator
- Directory and Files: files and directories related with TDengine
- Configuration on Server: customize IP port, cache size, file block size and other settings
- Configuration on Client: customize locale, default user and others
- User Management: add/delete users, change passwords
- Import Data: import data into TDengine from either script or CSV file
- Export Data: export data either from TDengine shell or from tool taosdump
- Management of Connections, Streams, Queries: check or kill the connections, queries
- System Monitor: collect the system metric, and log important operations
##More on System Architecture
- Storage Design: column-based storage with optimization on time-series data
- Query Design: an efficient way to query time-series data
- Technical blogs to delve into the inside of TDengine
## More on IoT Big Data
- [Characteristics of IoT Big Data](https://www.taosdata.com/blog/2019/07/09/characteristics-of-iot-big-data/)
- [Why dont General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/)
- [Why TDengine is the Best Choice for IoT Big Data Processing?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/)
##Tutorials & FAQ
- <a href='https://www.taosdata.com/en/faq'>FAQ</a>: a list of frequently asked questions and answers
- <a href='https://www.taosdata.com/en/blog/?categories=4'>Use cases</a>: a few typical cases to explain how to use TDengine in IoT platform

View File

@ -1,151 +0,0 @@
#Getting Started
## Quick Start
At the moment, TDengine only runs on Linux. You can set up and install it either from the <a href='#Install-from-Source'>source code</a> or the <a href='#Install-from-Package'>packages</a>. It takes only a few seconds from download to run it successfully.
### Install from Source
Please visit our [github page](https://github.com/taosdata/TDengine) for instructions on installation from the source code.
### Install from Package
Three different packages are provided, please pick up the one you like.
<ul id='packageList'>
<li><a id='tdengine-rpm' style='color:var(--b2)'>TDengine RPM package (1.5M)</a></li>
<li><a id='tdengine-deb' style='color:var(--b2)'>TDengine DEB package (1.7M)</a></li>
<li><a id='tdengine-tar' style='color:var(--b2)'>TDengine Tarball (3.0M)</a></li>
</ul>
For the time being, TDengine only supports installation on Linux systems using [`systemd`](https://en.wikipedia.org/wiki/Systemd) as the service manager. To check if your system has *systemd* package, use the _which systemctl_ command.
```cmd
which systemctl
```
If the `systemd` package is not found, please [install from source code](#Install-from-Source).
### Running TDengine
After installation, start the TDengine service by the `systemctl` command.
```cmd
systemctl start taosd
```
Then check if the server is working now.
```cmd
systemctl status taosd
```
If the service is running successfully, you can play around through TDengine shell `taos`, the command line interface tool located in directory /usr/local/bin/taos
**Note: The _systemctl_ command needs the root privilege. Use _sudo_ if you are not the _root_ user.**
##TDengine Shell
To launch TDengine shell, the command line interface, in a Linux terminal, type:
```cmd
taos
```
The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](../faq) page for troubleshooting the connection error). The TDengine shell prompt is:
```cmd
taos>
```
In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:
```mysql
create database db;
use db;
create table t (ts timestamp, cdata int);
insert into t values ('2019-07-15 10:00:00', 10);
insert into t values ('2019-07-15 10:01:05', 20);
select * from t;
ts | speed |
===================================
19-07-15 10:00:00.000| 10|
19-07-15 10:01:05.000| 20|
Query OK, 2 row(s) in set (0.001700s)
```
Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.
###Shell Command Line Parameters
You can run `taos` command with command line options to fit your needs. Some frequently used options are listed below:
- -c, --config-dir: set the configuration directory. It is _/etc/taos_ by default
- -h, --host: set the IP address of the server it will connect to, Default is localhost
- -s, --commands: set the command to run without entering the shell
- -u, -- user: user name to connect to server. Default is root
- -p, --password: password. Default is 'taosdata'
- -?, --help: get a full list of supported options
Examples:
```cmd
taos -h 192.168.0.1 -s "use db; show tables;"
```
###Run Batch Commands
Inside TDengine shell, you can run batch commands in a file with *source* command.
```
taos> source <filename>;
```
### Tips
- Use up/down arrow key to check the command history
- To change the default password, use "`alter user`" command
- ctrl+c to interrupt any queries
- To clean the cached schema of tables or STables, execute command `RESET QUERY CACHE`
## Major Features
The core functionality of TDengine is the time-series database. To reduce the development and management complexity, and to improve the system efficiency further, TDengine also provides caching, pub/sub messaging system, and stream computing functionalities. It provides a full stack for IoT big data platform. The detailed features are listed below:
- SQL like query language used to insert or explore data
- C/C++, Java(JDBC), Python, Go, RESTful, and Node.JS interfaces for development
- Ad hoc queries/analysis via Python/R/Matlab or TDengine shell
- Continuous queries to support sliding-window based stream computing
- Super table to aggregate multiple time-streams efficiently with flexibility
- Aggregation over a time window on one or multiple time-streams
- Built-in messaging system to support publisher/subscriber model
- Built-in cache for each time stream to make latest data available as fast as light speed
- Transparent handling of historical data and real-time data
- Integrating with Telegraf, Grafana and other tools seamlessly
- A set of tools or configuration to manage TDengine
For enterprise edition, TDengine provides more advanced features below:
- Linear scalability to deliver higher capacity/throughput
- High availability to guarantee the carrier-grade service
- Built-in replication between nodes which may span multiple geographical sites
- Multi-tier storage to make historical data management simpler and cost-effective
- Web-based management tools and other tools to make maintenance simpler
TDengine is specially designed and optimized for time-series data processing in IoT, connected cars, Industrial IoT, IT infrastructure and application monitoring, and other scenarios. Compared with other solutions, it is 10x faster on insert/query speed. With a single-core machine, over 20K requestes can be processed, millions data points can be ingested, and over 10 million data points can be retrieved in a second. Via column-based storage and tuned compression algorithm for different data types, less than 1/10 storage space is required.
## Explore More on TDengine
Please read through the whole <a href='../documentation'>documentation</a> to learn more about TDengine.

View File

@ -1,248 +0,0 @@
# TDengine的技术设计
## 存储设计
TDengine的数据存储主要包含**元数据的存储**和**写入数据的存储**。以下章节详细介绍了TDengine各种数据的存储结构。
### 元数据的存储
TDengine中的元数据信息包括TDengine中的数据库超级表等信息。元数据信息默认存放在 _/var/lib/taos/mgmt/_ 文件夹下。该文件夹的目录结构如下所示:
```
/var/lib/taos/
+--mgmt/
+--db.db
+--meters.db
+--user.db
+--vgroups.db
```
元数据在文件中按顺序排列。文件中的每条记录代表TDengine中的一个元数据机构数据库、表等。元数据文件只进行追加操作即便是元数据的删除也只是在数据文件中追加一条删除的记录。
### 写入数据的存储
TDengine中写入的数据在硬盘上是按时间维度进行分片的。同一个vnode中的表在同一时间范围内的数据都存放在同一文件组中如下图中的v0f1804*文件。这一数据分片方式可以大大简化数据在时间维度的查询提高查询速度。在默认配置下硬盘上的每个文件存放10天数据。用户可根据需要调整数据库的 _daysPerFile_ 配置项进行配置。 数据在文件中是按块存储的。每个数据块只包含一张表的数据且数据是按照时间主键递增排列的。数据在数据块中按列存储这样使得同类型的数据存放在一起可以大大提高压缩的比例节省存储空间。TDengine对不同类型的数据采用了不同的压缩算法进行压缩以达到最优的压缩结果。TDengine使用的压缩算法包括simple8B、delta-of-delta、RLE以及LZ4等。
TDengine的数据文件默认存放在 */var/lib/taos/data/* 下。而 */var/lib/taos/tsdb/* 文件夹下存放了vnode的信息、vnode中表的信息以及数据文件的链接等。其完整目录结构如下所示
```
/var/lib/taos/
+--tsdb/
| +--vnode0
| +--meterObj.v0
| +--db/
| +--v0f1804.head->/var/lib/taos/data/vnode0/v0f1804.head1
| +--v0f1804.data->/var/lib/taos/data/vnode0/v0f1804.data
| +--v0f1804.last->/var/lib/taos/data/vnode0/v0f1804.last1
| +--v0f1805.head->/var/lib/taos/data/vnode0/v0f1805.head1
| +--v0f1805.data->/var/lib/taos/data/vnode0/v0f1805.data
| +--v0f1805.last->/var/lib/taos/data/vnode0/v0f1805.last1
| :
+--data/
+--vnode0/
+--v0f1804.head1
+--v0f1804.data
+--v0f1804.last1
+--v0f1805.head1
+--v0f1805.data
+--v0f1805.last1
:
```
#### meterObj文件
每个vnode中只存在一个 _meterObj_ 文件。该文件中存储了vnode的基本信息创建时间配置信息vnode的统计信息等以及该vnode中表的信息。其结构如下所示
```
<文件开始>
[文件头]
[表记录1偏移量和长度]
[表记录2偏移量和长度]
...
[表记录N偏移量和长度]
[表记录1]
[表记录2]
...
[表记录N]
[表记录]
<文件结尾>
```
其中文件头大小为512字节主要存放vnode的基本信息。每条表记录代表属于该vnode中的一张表在硬盘上的表示。
#### head文件
head文件中存放了其对应的data文件中数据块的索引信息。该文件组织形式如下
```
<文件开始>
[文件头]
[表1偏移量]
[表2偏移量]
...
[表N偏移量]
[表1数据索引]
[表2数据索引]
...
[表N数据索引]
<文件结尾>
```
文件开头的偏移量列表表示对应表的数据索引块的开始位置在文件中的偏移量。每张表的数据索引信息在head文件中都是连续存放的。这也使得TDengine在读取单表数据时可以将该表所有的数据块索引一次性读入内存大大提高读取速度。表的数据索引块组织如下
```
[索引块信息]
[数据块1索引]
[数据块2索引]
...
[数据块N索引]
```
其中索引块信息中记录了数据块的个数等描述信息。每个数据块索引对应一个在data文件或last文件中的一个单独的数据块。索引信息中记录了数据块存放的文件、数据块起始位置的偏移量、数据块中数据时间主键的范围等。索引块中的数据块索引是按照时间范围顺序排放的这也就是说索引块M对应的数据块中的数据时间范围都大于索引块M-1的。这种预先排序的存储方式使得在TDengine在进行按照时间戳进行查询时可以使用折半查找算法大大提高查询速度。
#### data文件
data文件中存放了真实的数据块。该文件只进行追加操作。其文件组织形式如下
```
<文件开始>
[文件头]
[数据块1]
[数据块2]
...
[数据块N]
<文件结尾>
```
每个数据块只属于vnode中的一张表且数据块中的数据按照时间主键排列。数据块中的数据按列组织排放使得同一类型的数据排放在一起方便压缩和读取。每个数据块的组织形式如下所示
```
[列1信息]
[列2信息]
...
[列N信息]
[列1数据]
[列2数据]
...
[列N数据]
```
列信息中包含该列的类型,列的压缩算法,列数据在文件中的偏移量以及长度等。除此之外,列信息中也包含该内存块中该列数据的预计算结果,从而在过滤查询时根据预计算结果判定是否读取数据块,大大提高读取速度。
#### last文件
为了防止数据块的碎片化提高查询速度和压缩率TDengine引入了last文件。当要落盘的数据块中的数据条数低于某个阈值时TDengine会先将该数据块写入到last文件中进行暂时存储。当有新的数据需要落盘时last文件中的数据会被读取出来与新数据组成新的数据块写入到data文件中。last文件的组织形式与data文件类似。
### TDengine数据存储小结
TDengine通过其创新的架构和存储结构设计有效提高了计算机资源的使用率。一方面TDengine的虚拟化使得TDengine的水平扩展及备份非常容易。另一方面TDengine将表中数据按时间主键排序存储且其列式存储的组织形式都使TDengine在写入、查询以及压缩方面拥有非常大的优势。
## 查询处理
### 概述
TDengine提供了多种多样针对表和超级表的查询处理功能除了常规的聚合查询之外还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、管理节点、数据节点协同完成。 各组件包含的与查询处理相关的功能和模块如下:
客户端Client App。客户端包含TAOS SQL的解析SQL Parser和查询请求执行器Query Executor第二阶段聚合器Result Merger连续查询管理器Continuous Query Manager等主要功能模块构成。SQL解析器负责对SQL语句进行解析校验并转化为抽象语法树查询执行器负责将抽象语法树转化查询执行逻辑并根据SQL语句查询条件将其转换为针对管理节点元数据查询和针对数据节点的数据查询两级查询处理。由于TAOS SQL当前不提供复杂的嵌套查询和pipeline查询处理机制所以不再需要查询计划优化、逻辑查询计划到物理查询计划转换等过程。第二阶段聚合器负责将各数据节点查询返回的独立结果进行二阶段聚合生成最后的结果。连续查询管理器则负责针对用户建立的连续查询进行管理负责定时拉起查询请求并按需将结果写回TDengine或返回给客户应用。此外客户端还负责查询失败后重试、取消查询请求、以及维持连接心跳、向管理节点上报查询状态等工作。
管理节点Management Node。管理节点保存了整个集群系统的全部数据的元数据信息向客户端节点提供查询所需的数据的元数据并根据集群的负载情况切分查询请求。通过超级表包含了通过该超级表创建的所有表的信息因此查询处理器Query Executor负责针对标签TAG的查询处理并将满足标签查询请求的表信息返回给客户端。此外管理节点还维护集群的查询状态Query Status Manager维护查询状态管理中在内存中临时保存有当前正在执行的全部查询当客户端使用 *show queries* 命令的时候,将当前系统正在运行的查询信息返回客户端。
数据节点Data Node。数据节点保存了数据库中全部数据内容并通过查询执行器、查询处理调度器、查询任务队列Query Task Queue进行查询处理的调度执行从客户端接收到的查询处理请求都统一放置到处理队列中查询执行器从队列中获得查询请求并负责执行。通过查询优化器Query Optimizer对于查询进行基本的优化处理以及通过数据节点的查询执行器Query Executor扫描符合条件的数据单元并返回计算结果。等接收客户端发出的查询请求执行查询处理并将结果返回。同时数据节点还需要响应来自管理节点的管理信息和命令例如 *kill query* 命令以后,需要即刻停止执行的查询任务。
<center> <img src="../assets/fig1.png"> </center>
<center>图 1. 系统查询处理架构图(只包含查询相关组件)</center>
### 普通查询处理
客户端、管理节点、数据节点协同完成TDengine的查询处理全流程。我们以一个具体的SQL查询为例说明TDengine的查询处理流程。SQL语句向超级表*FOO_SUPER_TABLE*查询获取时间范围在2019年1月12日整天标签TAG_LOC是'beijing'的表所包含的所有记录总数SQL语句如下
```sql
SELECT COUNT(*)
FROM FOO_SUPER_TABLE
WHERE TAG_LOC = 'beijing' AND TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00'
```
首先客户端调用TAOS SQL解析器对SQL语句进行解析及合法性检查然后生成语法树并从中提取查询的对象 — 超级表 *FOO_SUPER_TABLE* 然后解析器向管理节点Management Node请求其相应的元数据信息并将过滤信息TAG_LOC='beijing')同时发送到管理节点。
管理节点接收元数据获取的请求,首先找到超级表 *FOO_SUPER_TABLE* 基础信息然后应用查询条件来过滤通过该超级表创建的全部表最后满足查询条件TAG_LOC='beijing'),即 *TAG_LOC* 标签列是 'beijing' 的的通过其查询执行器将满足查询要求的对象(表或超级表)的元数据信息返回给客户端。
客户端获得了 *FOO_SUPER_TABLE* 的元数据信息后查询执行器根据元数据中的数据分布分别向保存有相应数据的节点发起查询请求此时时间戳范围过滤条件TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00')需要同时发送给全部的数据节点。
数据节点接收到发自客户端的查询,转化为内部结构并进行优化以后将其放入任务执行队列,等待查询执行器执行。当查询结果获得以后,将查询结果返回客户端。数据节点执行查询的过程均相互独立,完全只依赖于自身的数据和内容进行计算。
当所有查询涉及的数据节点返回结果后,客户端将每个数据节点查询的结果集再次进行聚合(针对本案例,即将所有结果再次进行累加),累加的结果即为最后的查询结果。第二阶段聚合并不是所有的查询都需要。例如,针对数据的列选取操作,实际上是不需要第二阶段聚合。
### REST查询处理
在 C/C++ 、Python接口、 JDBC 接口之外TDengine 还提供基于 HTTP 协议的 REST 接口。不同于使用应用客户端开发程序进行的开发。当用户使用 REST 接口的时候,所有的查询处理过程都是在服务器端来完成,用户的应用服务不会参与数据库的计算过程,查询处理完成后结果通过 HTTP的 JSON 格式返回给用户。
<center> <img src="../assets/fig2.png"> </center>
<center>图 2. REST查询架构</center>
当用户使用基于HTTP的REST查询接口HTTP的请求首先与位于数据节点的HTTP连接器 Connector建立连接然后通过REST的签名机制使用Token来确保请求的可靠性。对于数据节点HTTP连接器接收到请求后调用内嵌的客户端程序发起查询请求内嵌客户端将解析通过HTTP连接器传递过来的SQL语句解析该SQL语句并按需向管理节点请求元数据信息然后向本机或集群中其他节点发送查询请求最后按需聚合计算结果。HTTP连接器接收到请求SQL以后后续的流程处理与采用应用客户端方式的查询处理完全一致。最后还需要将查询的结果转换为JSON格式字符串并通过HTTP 响应返回给客户端。
可以看到在处理HTTP流程的整个过程中用户应用不再参与到查询处理的过程中只负责通过HTTP协议发送SQL请求并接收JSON格式的结果。同时还需要注意的是每个数据节点均内嵌了一个HTTP连接器和客户端程序因此请求集群中任何一个数据节点该数据节点均能够通过HTTP协议返回用户的查询结果。
### 技术特征
由于TDengine采用数据和标签分离存储的模式能够极大地降低标签数据存储的冗余度。标签数据直接关联到每个表并采用全内存的结构进行管理和维护标签数据,全内存的结构提供快速的查询处理千万级别规模的标签数据查询可以在毫秒级别返回。首先针对标签数据的过滤可以有效地降低第二阶段的查询涉及的数据规模。为有效地提升查询处理的性能针对物联网数据的不可更改的特点TDengine采用在每个保存的数据块上都记录下该数据块中数据的最大值、最小值、和等统计数据。如果查询处理涉及整个数据块的全部数据则直接使用预计算结果不再读取数据块的内容。由于预计算模块的大小远小于磁盘上存储的具体数据的大小对于磁盘IO为瓶颈的查询处理使用预计算结果可以极大地减小读取IO并加速查询处理的流程。
由于TDengine采用按列存储数据。当从磁盘中读取数据块进行计算的时候按照查询列信息读取该列数据并不需要读取其他不相关的数据可以最小化读取数据。此外由于采用列存储结构数据节点针对数据的扫描采用该列数据块进行可以充分利用CPU L2高速缓存极大地加速数据扫描的速度。此外对于某些查询并不会等全部查询结果生成后再返回结果。例如列选取查询当第一批查询结果获得以后数据节点直接将其返回客户端。同时在查询处理过程中系统在数据节点接收到查询请求以后马上返回客户端查询确认信息并同时拉起查询处理过程并等待查询执行完成后才返回给用户查询有响应。
## TDengine集群设计
### 1集群与主要逻辑单元
TDengine是基于硬件、软件系统不可靠、一定会有故障的假设进行设计的是基于任何单台计算机都无足够能力处理海量数据的假设进行设计的。因此TDengine从研发的第一天起就按照分布式高可靠架构进行设计是完全去中心化的是水平扩展的这样任何单台或多台服务器宕机或软件错误都不影响系统的服务。通过节点虚拟化并辅以自动化负载均衡技术TDengine能最大限度地利用异构集群中的计算和存储资源。而且只要数据副本数大于一无论是硬软件的升级、还是IDC的迁移等都无需停止集群的服务极大地保证系统的正常运行并且降低了系统管理员和运维人员的工作量。
下面的示例图上有八个物理节点,每个物理节点被逻辑的划分为多个虚拟节点。下面对系统的基本概念进行介绍。
![assets/nodes.png](../assets/nodes.png)
**物理节点(dnode)**集群中的一物理服务器或云平台上的一虚拟机。为安全以及通讯效率一个物理节点可配置两张网卡或两个IP地址。其中一张网卡用于集群内部通讯其IP地址为**privateIp**, 另外一张网卡用于与集群外部应用的通讯其IP地址为**publicIp**。在一些云平台如阿里云对外的IP地址是映射过来的因此publicIp还有一个对应的内部IP地址**internalIp**(与privateIp不同)。对于只有一个IP地址的物理节点publicIp, privateIp以及internalIp都是同一个地址没有任何区别。一个dnode上有而且只有一个taosd实例运行。
**虚拟数据节点(vnode)**:在物理节点之上的可独立运行的基础逻辑单元,时序数据写入、存储、查询等操作逻辑都在虚拟节点中进行(图中V)采集的时序数据就存储在vnode上。一个vnode包含固定数量的表。当创建一张新表时系统会检查是否需要创建新的vnode。一个物理节点上能创建的vnode的数量取决于物理节点的硬件资源。一个vnode只属于一个DB但一个DB可以有多个vnode。
**虚拟数据节点组(vgroup)**: 位于不同物理节点的vnode可以组成一个虚拟数据节点组vnode group(如上图dnode0中的V0, dnode1中的V1, dnode6中的V2属于同一个虚拟节点组)。归属于同一个vgroup的虚拟节点采取master/slave的方式进行管理。写只能在master上进行但采用asynchronous的方式将数据同步到slave这样确保了一份数据在多个物理节点上有拷贝。如果master节点宕机其他节点监测到后将重新选举vgroup里的master, 新的master能继续处理数据请求从而保证系统运行的可靠性。一个vgroup里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N系统必须有至少N个物理节点。副本数在创建DB时通过参数replica可以指定缺省为1。使用TDengine, 数据的安全依靠多副本解决,因此不再需要昂贵的磁盘阵列等存储设备。
**虚拟管理节点(mnode)**负责所有节点运行状态的监控和维护以及节点之间的负载均衡图中M。同时虚拟管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理因此也称为Meta Node。TDengine集群中可配置多个(最多不超过5个) mnode它们自动构建成为一个管理节点集群(图中M0, M1, M2)。mnode间采用master/slave的机制进行管理而且采取强一致方式进行数据同步。mnode集群的创建由系统自动完成无需人工干预。每个dnode上至多有一个mnode而且每个dnode都知道整个集群中所有mnode的IP地址。
**taosc**一个软件模块是TDengine给应用提供的驱动程序driver,内嵌于JDBC、ODBC driver中或者C语言连接库里。应用都是通过taosc而不是直接来与整个集群进行交互的。这个模块负责获取并缓存元数据将插入、查询等请求转发到正确的虚拟节点在把结果返回给应用时还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, ODBC, C/C++接口而言这个模块是在应用所处的计算机上运行但消耗的资源很小。为支持全分布式的REST接口taosc在TDengine集群的每个dnode上都有一运行实例。
**对外服务地址**TDengine集群可以容纳单台、多台甚至几千台物理节点。应用只需要向集群中任何一个物理节点的publicIp发起连接即可。启动CLI应用taos时选项-h需要提供的就是publicIp。
**master/secondIp**每一个dnode都需要配置一个masterIp。dnode启动后将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp对于集群中的第一个节点就是它自己的privateIp。为保证连接成功每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图连接secondIp。
dnode启动后会获知集群的mnode IP列表并且定时向mnode发送状态信息。
vnode与mnode只是逻辑上的划分都是执行程序taosd里的不同线程而已无需安装不同的软件做任何特殊的配置。最小的系统配置就是一个物理节点,vnode,mnode和taosc都存在而且都正常运行但单一节点无法保证系统的高可靠。
### 2一典型的操作流程
为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色下面对写入数据这个典型操作的流程进行剖析。
![Picture1](../assets/Picture2.png)
1. 应用通过JDBC、ODBC或其他API接口发起插入数据的请求。
2. taosc会检查缓存看是有保存有该表的meta data。如果有直接到第4步。如果没有taosc将向mnode发出get meta-data请求。
3. mnode将该表的meta-data返回给taosc。Meta-data包含有该表的schema, 而且还有该表所属的vgroup信息vnode ID以及所在的dnode的IP地址如果副本数为N就有N组vnodeID/IP)。如果taosc迟迟得不到mnode回应而且存在多个mnode,taosc将向下一个mnode发出请求。
4. taosc向master vnode发起插入请求。
5. vnode插入数据后给taosc一个应答表示插入成功。如果taosc迟迟得不到vnode的回应taosc会认为该节点已经离线。这种情况下如果被插入的数据库有多个副本taosc将向vgroup里下一个vnode发出插入请求。
6. taosc通知APP写入成功。
对于第二和第三步taosc启动时并不知道mnode的IP地址因此会直接向配置的集群对外服务的IP地址发起请求。如果接收到该请求的dnode并没有配置mnode该dnode会在回复的消息中告知mnode的IP地址列表如果有多个dnodesmnode的IP地址可以有多个这样taosc会重新向新的mnode的IP地址发出获取meta-data的请求。
对于第四和第五步没有缓存的情况下taosc无法知道虚拟节点组里谁是master就假设第一个vnodeID/IP就是master,向它发出请求。如果接收到请求的vnode并不是master,它会在回复中告知谁是master这样taosc就向建议的master vnode发出请求。一旦得到插入成功的回复taosc会缓存住master节点的信息。
上述是插入数据的流程查询、计算的流程也完全一致。taosc把这些复杂的流程全部封装屏蔽了因此应用无需处理重定向、获取meta data等细节完全是透明的。
通过taosc缓存机制只有在第一次对一张表操作时才需要访问mnode, 因此mnode不会成为系统瓶颈。但因为schema有可能变化而且vgroup有可能发生改变比如负载均衡发生因此taosc需要定时自动刷新缓存。
### 3数据分区
vnode(虚拟数据节点)保存采集的时序数据而且查询、计算都在这些节点上进行。为便于负载均衡、数据恢复、支持异构环境TDengine将一个物理节点根据其计算和存储资源切分为多个vnode。这些vnode的管理是TDengine自动完成的对应用完全透明。
对于单独一个数据采集点无论其数据量多大一个vnode或vnode group, 如果副本数大于1有足够的计算资源和存储资源来处理如果每秒生成一条16字节的记录一年产生的原始数据不到0.5G因此TDengine将一张表的所有数据都存放在一个vnode里而不会让同一个采集点的数据分布到两个或多个dnode上。而且一个vnode可存储多张表的数据一个vnode可容纳的表的数目由配置参数tables指定缺省为2000。设计上一个vnode里所有的表都属于同一个DB。因此一个数据库DB需要的vnode或vgroup的个数等于数据库表的数目/tables。
创建DB时系统并不会马上分配资源。但当创建一张表时系统将看是否有已经分配的vnode, 而且是否有空位如果有立即在该有空位的vnode创建表。如果没有系统将从集群中根据当前的负载情况在一个dnode上创建一新的vnode, 然后创建表。如果DB有多个副本系统不是只创建一个vnode而是一个vgroup(虚拟数据节点组)。系统对vnode的数目没有任何限制仅仅受限于物理节点本身的计算和存储资源。
参数tables的设置需要考虑具体场景创建DB时可以个性化指定该参数。该参数不宜过大也不宜过小。过小极端情况就是每个数据采集点一个vnode, 这样导致系统数据文件过多。过大虚拟化带来的优势就会丧失。给定集群计算资源的情况下整个系统vnode的个数应该是CPU核的数目的两倍以上。
### 4负载均衡
每个dnode(物理节点)都定时向 mnode(虚拟管理节点)报告其状态包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等因此mnode了解整个集群的状态。基于整体状态当mnode发现某个dnode负载过重它会将dnode上的一个或多个vnode挪到其他dnode。在挪动过程中对外服务继续进行数据插入、查询和计算操作都不受影响。负载均衡操作结束后应用也无需重启将自动连接新的vnode。
如果mnode一段时间没有收到dnode的状态报告mnode会认为这个dnode已经离线。如果离线时间超过一定时长时长由配置参数offlineThreshold决定该dnode将被mnode强制剔除出集群。该dnode上的vnodes如果副本数大于一系统将自动在其他dnode上创建新的副本以保证数据的副本数。
**Note**目前集群功能仅仅限于企业版

View File

@ -1,176 +0,0 @@
# TDengine System Architecture
## Storage Design
TDengine data mainly include **metadata** and **data** that we will introduce in the following sections.
### Metadata Storage
Metadata include the information of databases, tables, etc. Metadata files are saved in _/var/lib/taos/mgmt/_ directory by default. The directory tree is as below:
```
/var/lib/taos/
+--mgmt/
+--db.db
+--meters.db
+--user.db
+--vgroups.db
```
A metadata structure (database, table, etc.) is saved as a record in a metadata file. All metadata files are appended only, and even a drop operation adds a deletion record at the end of the file.
### Data storage
Data in TDengine are sharded according to the time range. Data of tables in the same vnode in a certain time range are saved in the same filegroup, such as files v0f1804*. This sharding strategy can effectively improve data searching speed. By default, a group of files contains data in 10 days, which can be configured by *daysPerFile* in the configuration file or by *DAYS* keyword in *CREATE DATABASE* clause. Data in files are blockwised. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp, which helps to improve the compression rate and save storage. The compression algorithms used in TDengine include simple8B, delta-of-delta, RLE, LZ4, etc.
By default, TDengine data are saved in */var/lib/taos/data/* directory. _/var/lib/taos/tsdb/_ directory contains vnode informations and data file linkes.
```
/var/lib/taos/
+--tsdb/
| +--vnode0
| +--meterObj.v0
| +--db/
| +--v0f1804.head->/var/lib/taos/data/vnode0/v0f1804.head1
| +--v0f1804.data->/var/lib/taos/data/vnode0/v0f1804.data
| +--v0f1804.last->/var/lib/taos/data/vnode0/v0f1804.last1
| +--v0f1805.head->/var/lib/taos/data/vnode0/v0f1805.head1
| +--v0f1805.data->/var/lib/taos/data/vnode0/v0f1805.data
| +--v0f1805.last->/var/lib/taos/data/vnode0/v0f1805.last1
| :
+--data/
+--vnode0/
+--v0f1804.head1
+--v0f1804.data
+--v0f1804.last1
+--v0f1805.head1
+--v0f1805.data
+--v0f1805.last1
:
```
#### meterObj file
There are only one meterObj file in a vnode. Informations bout the vnode, such as created time, configuration information, vnode statistic informations are saved in this file. It has the structure like below:
```
<start_of_file>
[file_header]
[table_record1_offset&length]
[table_record2_offset&length]
...
[table_recordN_offset&length]
[table_record1]
[table_record2]
...
[table_recordN]
<end_of_file>
```
The file header takes 512 bytes, which mainly contains informations about the vnode. Each table record is the representation of a table on disk.
#### head file
The _head_ files contain the index of data blocks in the _data_ file. The inner organization is as below:
```
<start_of_file>
[file_header]
[table1_offset]
[table2_offset]
...
[tableN_offset]
[table1_index_block]
[table2_index_block]
...
[tableN_index_block]
<end_of_file>
```
The table offset array in the _head_ file saves the information about the offsets of each table index block. Indices on data blocks in the same table are saved continuously. This also makes it efficient to load data indices on the same table. The data index block has a structure like:
```
[index_block_info]
[block1_index]
[block2_index]
...
[blockN_index]
```
The index block info part contains the information about the index block such as the number of index blocks, etc. Each block index corresponds to a real data block in the _data_ file or _last_ file. Information about the location of the real data block, the primary timestamp range of the data block, etc. are all saved in the block index part. The block indices are sorted in ascending order according to the primary timestamp. So we can apply algorithms such as the binary search on the data to efficiently search blocks according to time.
#### data file
The _data_ files store the real data block. They are append-only. The organization is as:
```
<start_of_file>
[file_header]
[block1]
[block2]
...
[blockN]
<end_of_file>
```
A data block in _data_ files only belongs to a table in the vnode and the records in a data block are sorted in ascending order according to the primary timestamp key. Data blocks are column-oriented. Data in the same column are stored contiguously, which improves reading speed and compression rate because of their similarity. A data block has the following organization:
```
[column1_info]
[column2_info]
...
[columnN_info]
[column1_data]
[column2_data]
...
[columnN_data]
```
The column info part includes information about column types, column compression algorithm, column data offset and length in the _data_ file, etc. Besides, pre-calculated results of the column data in the block are also in the column info part, which helps to improve reading speed by avoiding loading data block necessarily.
#### last file
To avoid storage fragment and to import query speed and compression rate, TDengine introduces an extra file, the _last_ file. When the number of records in a data block is lower than a threshold, TDengine will flush the block to the _last_ file for temporary storage. When new data comes, the data in the _last_ file will be merged with the new data and form a larger data block and written to the _data_ file. The organization of the _last_ file is similar to the _data_ file.
### Summary
The innovation in architecture and storage design of TDengine improves resource usage. On the one hand, the virtualization makes it easy to distribute resources between different vnodes and for future scaling. On the other hand, sorted and column-oriented storage makes TDengine have a great advantage in writing, querying and compression.
## Query Design
#### Introduction
TDengine provides a variety of query functions for both tables and super tables. In addition to regular aggregate queries, it also provides time window based query and statistical aggregation for time series data. TDengine's query processing requires the client app, management node, and data node to work together. The functions and modules involved in query processing included in each component are as follows:
Client (Client App). The client development kit, embed in a client application, consists of TAOS SQL parser and query executor, the second-stage aggregator (Result Merger), continuous query manager and other major functional modules. The SQL parser is responsible for parsing and verifying the SQL statement and converting it into an abstract syntax tree. The query executor is responsible for transforming the abstract syntax tree into the query execution logic and creates the metadata query according to the query condition of the SQL statement. Since TAOS SQL does not currently include complex nested queries and pipeline query processing mechanism, there is no longer need for query plan optimization and physical query plan conversions. The second-stage aggregator is responsible for performing the aggregation of the independent results returned by query involved data nodes at the client side to generate final results. The continuous query manager is dedicated to managing the continuous queries created by users, including issuing fixed-interval query requests and writing the results back to TDengine or returning to the client application as needed. Also, the client is also responsible for retrying after the query fails, canceling the query request, and maintaining the connection heartbeat and reporting the query status to the management node.
Management Node. The management node keeps the metadata of all the data of the entire cluster system, provides the metadata of the data required for the query from the client node, and divides the query request according to the load condition of the cluster. The super table contains information about all the tables created according to the super table, so the query processor (Query Executor) of the management node is responsible for the query processing of the tags of tables and returns the table information satisfying the tag query. Besides, the management node maintains the query status of the cluster in the Query Status Manager component, in which the metadata of all queries that are currently executing are temporarily stored in-memory buffer. When the client issues *show queries* command to management node, current running queries information is returned to the client.
Data Node. The data node, responsible for storing all data of the database, consists of query executor, query processing scheduler, query task queue, and other related components. Once the query requests from the client received, they are put into query task queue and waiting to be processed by query executor. The query executor extracts the query request from the query task queue and invokes the query optimizer to perform the basic optimization for the query execution plan. And then query executor scans the qualified data blocks in both cache and disk to obtain qualified data and return the calculated results. Besides, the data node also needs to respond to management information and commands from the management node. For example, after the *kill query* received from the management node, the query task needs to be stopped immediately.
<center> <img src="../assets/fig1.png"> </center>
<center>Fig 1. System query processing architecture diagram (only query related components)</center>
#### Query Process Design
The client, the management node, and the data node cooperate to complete the entire query processing of TDengine. Let's take a concrete SQL query as an example to illustrate the whole query processing flow. The SQL statement is to query on super table *FOO_SUPER_TABLE* to get the total number of records generated on January 12, 2019, from the table, of which TAG_LOC equals to 'beijing'. The SQL statement is as follows:
```sql
SELECT COUNT(*)
FROM FOO_SUPER_TABLE
WHERE TAG_LOC = 'beijing' AND TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00'
```
First, the client invokes the TAOS SQL parser to parse and validate the SQL statement, then generates a syntax tree, and extracts the object of the query - the super table *FOO_SUPER_TABLE*, and then the parser sends requests with filtering information (TAG_LOC='beijing') to management node to get the corresponding metadata about *FOO_SUPER_TABLE*.
Once the management node receives the request for metadata acquisition, first finds the super table *FOO_SUPER_TABLE* basic information, and then applies the query condition (TAG_LOC='beijing') to filter all the related tables created according to it. And finally, the query executor returns the metadata information that satisfies the query request to the client.
After the client obtains the metadata information of *FOO_SUPER_TABLE*, the query executor initiates a query request with timestamp range filtering condition (TS >= '2019- 01-12 00:00:00' AND TS < '2019-01-13 00:00:00') to all nodes that hold the corresponding data according to the information about data distribution in metadata.
The data node receives the query sent from the client, converts it into an internal structure and puts it into the query task queue to be executed by query executor after optimizing the execution plan. When the query result is obtained, the query result is returned to the client. It should be noted that the data nodes perform the query process independently of each other, and rely solely on their data and content for processing.
When all data nodes involved in the query return results, the client aggregates the result sets from each data node. In this case, all results are accumulated to generate the final query result. The second stage of aggregation is not always required for all queries. For example, a column selection query does not require a second-stage aggregation at all.
#### REST Query Process
In addition to C/C++, Python, and JDBC interface, TDengine also provides a REST interface based on the HTTP protocol, which is different from using the client application programming interface. When the user uses the REST interface, all the query processing is completed on the server-side, and the user's application is not involved in query processing anymore. After the query processing is completed, the result is returned to the client through the HTTP JSON string.
<center> <img src="../assets/fig2.png"> </center>
<center>Fig. 2 REST query architecture</center>
When a client uses an HTTP-based REST query interface, the client first establishes a connection with the HTTP connector at the data node and then uses the token to ensure the reliability of the request through the REST signature mechanism. For the data node, after receiving the request, the HTTP connector invokes the embedded client program to initiate a query processing, and then the embedded client parses the SQL statement from the HTTP connector and requests the management node to get metadata as needed. After that, the embedded client sends query requests to the same data node or other nodes in the cluster and aggregates the calculation results on demand. Finally, you also need to convert the result of the query into a JSON format string and return it to the client via an HTTP response. After the HTTP connector receives the request SQL, the subsequent process processing is completely consistent with the query processing using the client application development kit.
It should be noted that during the entire processing, the client application is no longer involved in, and is only responsible for sending SQL requests through the HTTP protocol and receiving the results in JSON format. Besides, each data node is embedded with an HTTP connector and a client, so any data node in the cluster received requests from a client, the data node can initiate the query and return the result to the client through the HTTP protocol, with transfer the request to other data nodes.
#### Technology
Because TDengine stores data and tags value separately, the tag value is kept in the management node and directly associated with each table instead of records, resulting in a great reduction of the data storage. Therefore, the tag value can be managed by a fully in-memory structure. First, the filtering of the tag data can drastically reduce the data size involved in the second phase of the query. The query processing for the data is performed at the data node. TDengine takes advantage of the immutable characteristics of IoT data by calculating the maximum, minimum, and other statistics of the data in one data block on each saved data block, to effectively improve the performance of query processing. If the query process involves all the data of the entire data block, the pre-computed result is used directly, and the content of the data block is no longer needed. Since the size of disk space required to store the pre-computation result is much smaller than the size of the specific data, the pre-computation result can greatly reduce the disk IO and speed up the query processing.
TDengine employs column-oriented data storage techniques. When the data block is involved to be loaded from the disk for calculation, only the required column is read according to the query condition, and the read overhead can be minimized. The data of one column is stored in a contiguous memory block and therefore can make full use of the CPU L2 cache to greatly speed up the data scanning. Besides, TDengine utilizes the eagerly responding mechanism and returns a partial result before the complete result is acquired. For example, when the first batch of results is obtained, the data node immediately returns it directly to the client in case of a column select query.

View File

@ -1,224 +0,0 @@
# 超级表STable多表聚合
TDengine要求每个数据采集点单独建表这样能极大提高数据的插入/查询性能但是导致系统中表的数量猛增让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度TDengine引入了超级表STable (Super Table)的概念。
## 什么是超级表
STable是同一类型数据采集点的抽象是同类型采集实例的集合包含多张数据结构一样的子表。每个STable为其子表定义了表结构和一组标签表结构即表中记录的数据列及其数据类型标签名和数据类型由STable定义标签值记录着每个子表的静态信息用以对子表进行分组过滤。子表本质上就是普通的表由一个时间戳主键和若干个数据列组成每行记录着具体的数据数据查询操作与普通表完全相同但子表与普通表的区别在于每个子表从属于一张超级表并带有一组由STable定义的标签值。每种类型的采集设备可以定义一个STable。数据模型定义表的每列数据的类型如温度、压力、电压、电流、GPS实时位置等而标签信息属于Meta Data如采集设备的序列号、型号、位置等是静态的是表的元数据。用户在创建表数据采集点时指定STable(采集类型)外,还可以指定标签的值,也可事后增加或修改。
TDengine扩展标准SQL语法用于定义STable使用关键词tags指定标签信息。语法如下
```mysql
CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
```
其中tag_name是标签名tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型标签的个数最多为6个名字不能与系统关键词相同也不能与其他列名相同。如
```mysql
create table thermometer (ts timestamp, degree float)
tags (location binary(20), type int)
```
上述SQL创建了一个名为thermometer的STable带有标签location和标签type。
为某个采集点创建表时可以指定其所属的STable以及标签的值语法如下
```mysql
CREATE TABLE <tb_name> USING <stb_name> TAGS (tag_value1,...)
```
沿用上面温度计的例子使用超级表thermometer建立单个温度计数据表的语句如下
```mysql
create table t1 using thermometer tags (beijing, 10)
```
上述SQL以thermometer为模板创建了名为t1的表这张表的Schema就是thermometer的Schema但标签location值为beijing标签type值为10。
用户可以使用一个STable创建数量无上限的具有不同标签的表从这个意义上理解STable就是若干具有相同数据模型不同标签的表的集合。与普通表一样用户可以创建、删除、查看超级表STable大部分适用于普通表的查询操作都可运用到STable上包括各种聚合和投影选择函数。除此之外可以设置标签的过滤条件仅对STbale中部分表进行聚合查询大大简化应用的开发。
TDengine对表的主键时间戳建立索引暂时不提供针对数据模型中其他采集量比如温度、压力值的索引。每个数据采集点会采集若干数据记录但每个采集点的标签仅仅是一条记录因此数据标签在存储上没有冗余且整体数据规模有限。TDengine将标签数据与采集的动态数据完全分离存储而且针对STable的标签建立了高性能内存索引结构为标签提供全方位的快速操作支持。用户可按照需求对其进行增删改查CreateRetrieveUpdateDeleteCRUD操作。
STable从属于库一个STable只属于一个库但一个库可以有一到多个STable, 一个STable可有多个子表。
## 超级表管理
- 创建超级表
```mysql
CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
```
与创建表的SQL语法相似。但需指定TAGS字段的名称和类型。
说明:
1. TAGS列总长度不能超过512 bytes
2. TAGS列的数据类型不能是timestamp和nchar类型
3. TAGS列名不能与其他列名相同;
4. TAGS列名不能为预留关键字.
- 显示已创建的超级表
```mysql
show stables;
```
查看数据库内全部STable及其相关信息包括STable的名称、创建时间、列数量、标签TAG数量、通过该STable建表的数量。
- 删除超级表
```mysql
DROP TABLE <stable_name>
```
Note: 删除STable不会级联删除通过STable创建的表相反删除STable时要求通过该STable创建的表都已经被删除。
- 查看属于某STable并满足查询条件的表
```mysql
SELECT TBNAME,[TAG_NAME,…] FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] …)
```
查看属于某STable并满足查询条件的表。说明TBNAME为关键词显示通过STable建立的子表表名查询过程中可以使用针对标签的条件。
```mysql
SELECT COUNT(TBNAME) FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] …)
```
统计属于某个STable并满足查询条件的子表的数量
## 写数据时自动建子表
在某些特殊场景中用户在写数据时并不确定某个设备的表是否存在此时可使用自动建表语法来实现写入数据时里用超级表定义的表结构自动创建不存在的子表若该表已存在则不会建立新表。注意自动建表语句只能自动建立子表而不能建立超级表这就要求超级表已经被事先定义好。自动建表语法跟insert/import语法非常相似唯一区别是语句中增加了超级表和标签信息。具体语法如下
```mysql
INSERT INTO <tb_name> USING <stb_name> TAGS (<tag1_value>, ...) VALUES (field_value, ...) (field_value, ...) ...;
```
向表tb_name中插入一条或多条记录如果tb_name这张表不存在则会用超级表stb_name定义的表结构以及用户指定的标签值(即tag1_value…)来创建名为tb_name新表并将用户指定的值写入表中。如果tb_name已经存在则建表过程会被忽略系统也不会检查tb_name的标签是否与用户指定的标签值一致也即不会更新已存在表的标签。
```mysql
INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<field1_value1>, ...) (<field1_value2>, ...) ... <tb_name2> USING <stb_name2> TAGS(<tag1_value2>, ...) VALUES (<field1_value1>, ...) ...;
```
向多张表tb1_nametb2_name等插入一条或多条记录并分别指定各自的超级表进行自动建表。
## STable中TAG管理
除了更新标签的值的操作是针对子表进行其他所有的标签操作添加标签、删除标签等均只能作用于STable不能对单个子表操作。对STable添加标签以后依托于该STable建立的所有表将自动增加了一个标签对于数值型的标签新增加的标签的默认值是0.
- 添加新的标签
```mysql
ALTER TABLE <stable_name> ADD TAG <new_tag_name> <TYPE>
```
为STable增加一个新的标签并指定新标签的类型。标签总数不能超过6个。
- 删除标签
```mysql
ALTER TABLE <stable_name> DROP TAG <tag_name>
```
删除超级表的一个标签,从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。
说明第一列标签不能删除至少需要为STable保留一个标签。
- 修改标签名
```mysql
ALTER TABLE <stable_name> CHANGE TAG <old_tag_name> <new_tag_name>
```
修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。
- 修改子表的标签值
```mysql
ALTER TABLE <table_name> SET TAG <tag_name>=<new_tag_value>
```
## STable多表聚合
针对所有的通过STable创建的子表进行多表聚合查询支持按照全部的TAG值进行条件过滤并可将结果按照TAGS中的值进行聚合暂不支持针对binary类型的模糊匹配过滤。语法如下
```mysql
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<interval> [, offset])
GROUP BY <tag_name>, <tag_name>
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>
SOFFSET <group_offset>
LIMIT <record_limit>
OFFSET <record_offset>
```
**说明**
超级表聚合查询TDengine目前支持以下聚合\选择函数sum、count、avg、first、last、min、max、top、bottom以及针对全部或部分列的投影操作使用方式与单表查询的计算过程相同。暂不支持其他类型的聚合计算和四则运算。当前所有的函数及计算过程均不支持嵌套的方式进行执行。
不使用GROUP BY的查询将会对超级表下所有满足筛选条件的表按时间进行聚合结果输出默认是按照时间戳单调递增输出用户可以使用ORDER BY _c0 ASC|DESC选择查询结果时间戳的升降排序使用GROUP BY <tag_name> 的聚合查询会按照tags进行分组并对每个组内的数据分别进行聚合输出结果为各个组的聚合结果组间的排序可以由ORDER BY <tag_name> 语句指定,每个分组内部,时间序列是单调递增的。
使用SLIMIT/SOFFSET语句指定组间分页即指定结果集中输出的最大组数以及对组起始的位置。使用LIMIT/OFFSET语句指定组内分页即指定结果集中每个组内最多输出多少条记录以及记录起始的位置。
## STable使用示例
以温度传感器采集时序数据作为例示范STable的使用。 在这个例子中对每个温度计都会建立一张表表名为温度计的ID温度计读数的时刻记为ts采集的值记为degree。通过tags给每个采集器打上不同的标签其中记录温度计的地区和类型以方便我们后面的查询。所有温度计的采集量都一样因此我们用STable来定义表结构。
###定义STable表结构并使用它创建子表
创建STable语句如下
```mysql
CREATE TABLE thermometer (ts timestamp, degree double)
TAGS(location binary(20), type int)
```
假设有北京天津和上海三个地区的采集器共4个温度采集器有3种类型我们就可以对每个采集器建表如下
```mysql
CREATE TABLE therm1 USING thermometer TAGS (beijing, 1);
CREATE TABLE therm2 USING thermometer TAGS (beijing, 2);
CREATE TABLE therm3 USING thermometer TAGS (tianjin, 1);
CREATE TABLE therm4 USING thermometer TAGS (shanghai, 3);
```
其中therm1therm2therm3therm4是超级表thermometer四个具体的子表也即普通的Table。以therm1为例它表示采集器therm1的数据表结构完全由thermometer定义标签location=”beijing”, type=1表示therm1的地区是北京类型是第1类的温度计。
###写入数据
注意写入数据时不能直接对STable操作而是要对每张子表进行操作。我们分别向四张表therm1therm2 therm3 therm4写入一条数据写入语句如下
```mysql
INSERT INTO therm1 VALUES (2018-01-01 00:00:00.000, 20);
INSERT INTO therm2 VALUES (2018-01-01 00:00:00.000, 21);
INSERT INTO therm3 VALUES (2018-01-01 00:00:00.000, 24);
INSERT INTO therm4 VALUES (2018-01-01 00:00:00.000, 23);
```
###按标签聚合查询
查询位于北京(beijing)和天津(tianjing)两个地区的温度传感器采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)、最低温度min(degree),并将结果按所处地域(location)和传感器类型(type)进行聚合。
```mysql
SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location=beijing or location=tianjing
GROUP BY location, type
```
###按时间周期聚合查询
查询仅位于北京以外地区的温度传感器最近24小时(24h)采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)和最低温度min(degree)将采集结果按照10分钟为周期进行聚合并将结果按所处地域(location)和传感器类型(type)再次进行聚合。
```mysql
SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE name<>beijing and ts>=now-1d
INTERVAL(10M)
GROUP BY location, type
```

View File

@ -1,195 +0,0 @@
# STable: Super Table
"One Table for One Device" design can improve the insert/query performance significantly for a single device. But it has a side effect, the aggregation of multiple tables becomes hard. To reduce the complexity and improve the efficiency, TDengine introduced a new concept: STable (Super Table).
## What is a Super Table
STable is an abstract and a template for a type of device. A STable contains a set of devices (tables) that have the same schema or data structure. Besides the shared schema, a STable has a set of tags, like the model, serial number and so on. Tags are used to record the static attributes for the devices and are used to group a set of devices (tables) for aggregation. Tags are metadata of a table and can be added, deleted or changed.
TDengine does not save tags as a part of the data points collected. Instead, tags are saved as metadata. Each table has a set of tags. To improve query performance, tags are all cached and indexed. One table can only belong to one STable, but one STable may contain many tables.
Like a table, you can create, show, delete and describe STables. Most query operations on tables can be applied to STable too, including the aggregation and selector functions. For queries on a STable, if no tags filter, the operations are applied to all the tables created via this STable. If there is a tag filter, the operations are applied only to a subset of the tables which satisfy the tag filter conditions. It will be very convenient to use tags to put devices into different groups for aggregation.
##Create a STable
Similiar to creating a standard table, syntax is:
```mysql
CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
```
New keyword "tags" is introduced, where tag_name is the tag name, and tag_type is the associated data type.
Note
1. The bytes of all tags together shall be less than 512
2. Tag's data type can not be time stamp or nchar
3. Tag name shall be different from the field name
4. Tag name shall not be the same as system keywords
5. Maximum number of tags is 6
For example:
```mysql
create table thermometer (ts timestamp, degree float)
tags (location binary(20), type int)
```
The above statement creates a STable thermometer with two tag "location" and "type"
##Create a Table via STable
To create a table for a device, you can use a STable as its template and assign the tag values. The syntax is:
```mysql
CREATE TABLE <tb_name> USING <stb_name> TAGS (tag_value1,...)
```
You can create any number of tables via a STable, and each table may have different tag values. For example, you create five tables via STable thermometer below:
```mysql
create table t1 using thermometer tags (beijing, 10);
create table t2 using thermometer tags (beijing, 20);
create table t3 using thermometer tags (shanghai, 10);
create table t4 using thermometer tags (shanghai, 20);
create table t5 using thermometer tags (new york, 10);
```
## Aggregate Tables via STable
You can group a set of tables together by specifying the tags filter condition, then apply the aggregation operations. The result set can be grouped and ordered based on tag value. Syntax is
```mysql
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<time range>)
GROUP BY <tag_name>, <tag_name>
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>
SOFFSET <group_offset>
LIMIT <record_limit>
OFFSET <record_offset>
```
For the time being, STable supports only the following aggregation/selection functions: *sum, count, avg, first, last, min, max, top, bottom*, and the projection operations, the same syntax as a standard table. Arithmetic operations are not supported, embedded queries not either.
*INTERVAL* is used for the aggregation over a time range.
If *GROUP BY* is not used, the aggregation is applied to all the selected tables, and the result set is output in ascending order of the timestamp, but you can use "*ORDER BY _c0 ASC|DESC*" to specify the order you like.
If *GROUP BY <tag_name>* is used, the aggregation is applied to groups based on tags. Each group is aggregated independently. Result set is a group of aggregation results. The group order is decided by *ORDER BY <tag_name>*. Inside each group, the result set is in the ascending order of the time stamp.
*SLIMIT/SOFFSET* are used to limit the number of groups and starting group number.
*LIMIT/OFFSET* are used to limit the number of records in a group and the starting rows.
###Example 1:
Check the average, maximum, and minimum temperatures of Beijing and Shanghai, and group the result set by location and type. The SQL statement shall be:
```mysql
SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location=beijing or location=tianjing
GROUP BY location, type
```
### Example 2:
List the number of records, average, maximum, and minimum temperature every 10 minutes for the past 24 hours for all the thermometers located in Beijing with type 10. The SQL statement shall be:
```mysql
SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE name=beijing and type=10 and ts>=now-1d
INTERVAL(10M)
```
## Create Table Automatically
Insert operation will fail if the table is not created yet. But for STable, TDengine can create the table automatically if the application provides the STable name, table name and tags' value when inserting data points. The syntax is:
```mysql
INSERT INTO <tb_name> USING <stb_name> TAGS (<tag1_value>, ...) VALUES (field_value, ...) (field_value, ...) ... <tb_name2> USING <stb_name2> TAGS(<tag1_value2>, ...) VALUES (<field1_value1>, ...) ...;
```
When inserting data points into table tb_name, the system will check if table tb_name is created or not. If it is already created, the data points will be inserted as usual. But if the table is not created yet, the system will create the table tb_bame using STable stb_name as the template with the tags. Multiple tables can be specified in the SQL statement.
## Management of STables
After you can create a STable, you can describe, delete, change STables. This section lists all the supported operations.
### Show STables in current DB
```mysql
show stables;
```
It lists all STables in current DB, including the name, created time, number of fileds, number of tags, and number of tables which are created via this STable.
### Describe a STable
```mysql
DESCRIBE <stable_name>
```
It lists the STable's schema and tags
### Drop a STable
```mysql
DROP TABLE <stable_name>
```
To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail.
### List the Associated Tables of a STable
```mysql
SELECT TBNAME,[TAG_NAME,…] FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] …)
```
It will list all the tables which satisfy the tag filter conditions. The tables are all created from this specific STable. TBNAME is a new keyword introduced, it is the table name associated with the STable.
```mysql
SELECT COUNT(TBNAME) FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] …)
```
The above SQL statement will list the number of tables in a STable, which satisfy the filter condition.
## Management of Tags
You can add, delete and change the tags for a STable, and you can change the tag value of a table. The SQL commands are listed below.
###Add a Tag
```mysql
ALTER TABLE <stable_name> ADD TAG <new_tag_name> <TYPE>
```
It adds a new tag to the STable with a data type. The maximum number of tags is 6.
###Drop a Tag
```mysql
ALTER TABLE <stable_name> DROP TAG <tag_name>
```
It drops a tag from a STable. The first tag could not be deleted, and there must be at least one tag.
###Change a Tag's Name
```mysql
ALTER TABLE <stable_name> CHANGE TAG <old_tag_name> <new_tag_name>
```
It changes the name of a tag from old to new.
###Change the Tag's Value
```mysql
ALTER TABLE <table_name> SET TAG <tag_name>=<new_tag_value>
```
It changes a table's tag value to a new one.

View File

@ -1,636 +0,0 @@
# TAOS SQL
TDengine provides a SQL like query language to insert or query data. You can execute the SQL statements through the TDengine Shell, or through C/C++, Java(JDBC), Python, Restful, Go, and Node.js APIs to interact with the `taosd` service.
Before reading through, please have a look at the conventions used for syntax descriptions here in this documentation.
* Squared brackets ("[]") indicate optional arguments or clauses
* Curly braces ("{}") indicate that one member from a set of choices in the braces must be chosen
* A single verticle line ("|") works a separator for multiple optional args or clauses
* Dots ("…") means repeating for as many times
## Data Types
### Timestamp
The timestamp is the most important data type in TDengine. The first column of each table must be **`TIMESTAMP`** type, but other columns can also be **`TIMESTAMP`** type. The following rules for timestamp:
* String Format: `'YYYY-MM-DD HH:mm:ss.MS'`, which represents the year, month, day, hour, minute and second and milliseconds. For example,`'2017-08-12 18:52:58.128'` is a valid timestamp string. Note: timestamp string must be quoted by either single quote or double quote.
* Epoch Time: a timestamp value can also be a long integer representing milliseconds since the epoch. For example, the values in the above example can be represented as an epoch `1502535178128` in milliseconds. Please note the epoch time doesn't need any quotes.
* Internal Function **`NOW`** : this is the current time of the server
* If timestamp is 0 when inserting a record, timestamp will be set to the current time of the server
* Arithmetic operations can be applied to timestamp. For example: `now-2h` represents a timestamp which is 2 hours ago from the current server time. Units include `a` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `n` (months), `y` (years). **`NOW`** can be used in either insertions or queries.
Default time precision is millisecond, you can change it to microseocnd by setting parameter enableMicrosecond in [system configuration](../administrator/#Configuration-on-Server). For epoch time, the long integer shall be microseconds since the epoch. For the above string format, MS shall be six digits.
### Data Types
The full list of data types is listed below. For string types of data, we will use ***M*** to indicate the maximum length of that type.
| | Data Type | Bytes | Note |
| ---- | :---------: | :-----: | ------------------------------------------------------------ |
| 1 | TINYINT | 1 | A nullable integer type with a range of [-127, 127] |
| 2 | SMALLINT | 2 | A nullable integer type with a range of [-32767, 32767] |
| 3 | INT | 4 | A nullable integer type with a range of [-2^31+1, 2^31-1 ] |
| 4 | BIGINT | 8 | A nullable integer type with a range of [-2^59, 2^59 ] |
| 5 | FLOAT | 4 | A standard nullable float type with 6 -7 significant digits and a range of [-3.4E38, 3.4E38] |
| 6 | DOUBLE | 8 | A standard nullable double float type with 15-16 significant digits and a range of [-1.7E308, 1.7E308] |
| 7 | BOOL | 1 | A nullable boolean type, [**`true`**, **`false`**] |
| 8 | TIMESTAMP | 8 | A nullable timestamp type with the same usage as the primary column timestamp |
| 9 | BINARY(*M*) | *M* | A nullable string type whose length is *M*, error should be threw with exceeded chars, the maximum length of *M* is 16374, but as maximum row size is 16K bytes, the actual upper limit will generally less than 16374. This type of string only supports ASCii encoded chars. |
| 10 | NCHAR(*M*) | 4 * *M* | A nullable string type whose length is *M*, error should be threw with exceeded chars. The **`NCHAR`** type supports Unicode encoded chars. |
All the keywords in a SQL statement are case-insensitive, but strings values are case-sensitive and must be quoted by a pair of `'` or `"`. To quote a `'` or a `"` , you can use the escape character `\`.
## Database Management
- **Create a Database**
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep]
```
Option: `KEEP` is used for data retention policy. The data records will be removed once keep-days are passed. There are more parameters related to DB storage, please check [system configuration](../administrator/#Configuration-on-Server).
- **Use a Database**
```mysql
USE db_name
```
Use or switch the current database.
- **Drop a Database**
```mysql
DROP DATABASE [IF EXISTS] db_name
```
Remove a database, all the tables inside the DB will be removed too, be careful.
- **List all Databases**
```mysql
SHOW DATABASES
```
## Table Management
- **Create a Table**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...])
```
Note:
1) The first column must be a `timestamp`, and the system will set it as the primary key.
2) The maximum number of columns is 1024, the minimum number of columns is 2.
3) The maximum length: database name is 33, table nume is 193, column name is 65.
4) The record size is limited to 16k bytes.
5) The total tag length is limited 16k bytes, the maximum number of tags is 128.
6) For `binary` or `nchar` data types, the length must be specified. For example, binary(20) means a binary data type with 20 bytes.
- **Drop a Table**
```mysql
DROP TABLE [IF EXISTS] tb_name
```
- **List all Tables **
```mysql
SHOW TABLES [LIKE tb_name_wildcar]
```
It shows all tables in the current DB.
Note: Wildcard characters can be used in the table name to filter tables.
Wildcard characters:
1) % means 0 to any number of characters.
2_ underscore means exactly one character.
- **Print Table Schema**
```mysql
DESCRIBE tb_name
```
- **Add a Column**
```mysql
ALTER TABLE tb_name ADD COLUMN field_name data_type
```
- **Drop a Column**
```mysql
ALTER TABLE tb_name DROP COLUMN field_name
```
If the table is created via [Super Table](), the schema can only be changed via STable. But for tables not created from STable, you can change their schema directly.
**Tips**: You can apply an operation on a table not in the current DB by concatenating DB name with the character '.', then with the table name. For example, 'demo.tb1' means the operation is applied to table `tb1` in DB `demo` even though `demo` is not the currently selected DB.
## STable Management
- **Create a STable**
```mysql
CREATE TABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3])
```
Create a STable is the same as you create a table, but the tag name and type shoudl be specified
Note:
1) The total tag length should not be exceeded 512 bytes
2) The type of tag cannot be timestamp
3) Tag name should not be the same as other column name or tag name
4) Tag name should not be any of Taos key words.
- **Drop a STable**
```mysql
DROP TABLE [IF EXISTS] tb_name
```
Delete a STable will also delete all the tables created using the STable.
- **List all STables**
```mysql
SHOW STABLES [LIKE tb_name_wildcar]
```
It shows all STables in the current DB.
Note: Wildcard characters can be used in the table name to filter tables.
Wildcard characters:
1) % means 0 to any number of characters.
2_ underscore means exactly one character.
- **Print STable Schema**
```mysql
DESCRIBE stb_name
```
- **Add a Column**
```mysql
ALTER TABLE stb_name ADD COLUMN field_name data_type
```
- **Drop a Column**
```mysql
ALTER TABLE stb_name DROP COLUMN field_name
```
If the table is created via [Super Table](), the schema can only be changed via STable. But for tables not created from STable, you can change their schema directly.
**Tips**: You can apply an operation on a table not in the current DB by concatenating DB name with the character '.', then with the table name. For example, 'demo.tb1' means the operation is applied to table `tb1` in DB `demo` even though `demo` is not the currently selected DB.
## STable Tag Management
- **Add a tag**
```mysql
ALTER TABLE stb_name ADD TAG new_tag_name tag_type
```
Add a tag and specify tag type for STable. The total number of tags should be no more than 128.
- **Drop a tag**
```mysql
ALTER TABLE stb_name DROP TAG tag_name
```
Delete a tag from STable, it will also delete the tag for the tables created using STable.
- **Modify tag name**
```mysql
ALTER TABLE stb_name CHANGE TAG old_tag_name new_tag_name
```
Modify a tag name for a STable, it will also modify the tag name for the tables created using STable.
- **Change tag value**
```mysql
ALTER TABLE tb_name SET TAG tag_name=new_tag_value
```
**Note**: 'Add a tag', 'Drop a tag', 'Modify tag name' used for STable, 'Change tag value' used for table.
## Inserting Records
- **Insert a Record**
```mysql
INSERT INTO tb_name VALUES (field_value, ...);
```
Insert a data record into table tb_name
- **Insert a Record with Selected Columns**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value, ...)
```
Insert a data record into table tb_name, with data in selected columns. If a column is not selected, the system will put NULL there. First column (time stamp ) cant not be null, it must be inserted.
- **Insert a Batch of Records**
```mysql
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;
```
Insert multiple data records into the table
- **Insert a Batch of Records with Selected Columns**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
```
- **Insert Records into Multiple Tables**
```mysql
INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
tb2_name VALUES (field1_value1, ...)(field1_value2, ...)...;
```
Insert data records into table tb1_name and tb2_name
- **Insert Records into Multiple Tables with Selected Columns**
```mysql
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value1, ...)
tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
```
Note: 1. If the timestamp is 0, the time stamp will be set to the system time on the server.
2. The timestamp of the oldest record allowed to be inserted is relative to the current server time, minus the configured keep value (the number of days the data is retained), and the timestamp of the latest record allowed to be inserted is relative to the current server time, plus the configured days value (the time span in which the data file stores data, in days). Both keep and days can be specified when creating the database. The default values are 3650 days and 10 days, respectively.
**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT.
## Data Query
###Query Syntax:
```mysql
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL [interval_offset,] interval_val]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[HAVING expr_list]
[SLIMIT limit_val [, SOFFSET offset_val]]
[LIMIT limit_val [, OFFSET offset_val]]
[>> export_file]
SELECT function_list FROM tb_name
[WHERE where_condition]
[LIMIT limit [, OFFSET offset]]
[>> export_file]
```
- To query a table, use `*` to select all data from a table; or a specified list of expressions `expr_list` of columns. The SQL expression can contain alias and arithmetic operations between numeric typed columns.
- For the `WHERE` conditions, use logical operations to filter the timestamp column and all numeric columns, and wild cards to filter the two string typed columns.
- Sort the result set by the first column timestamp `_c0` (or directly use the timestamp column name) in either descending or ascending order (by default). "Order by" could not be applied to other columns.
- Use `LIMIT` and `OFFSET` to control the number of rows returned and the starting position of the retrieved rows. LIMIT/OFFSET is applied after "order by" operations.
- Export the retrieved result set into a CSV file using `>>`. The target file's full path should be explicitly specified in the statement.
###Supported Operations of Data Filtering:
| Operation | Note | Applicable Data Types |
| --------- | ----------------------------- | ------------------------------------- |
| > | larger than | **`timestamp`** and all numeric types |
| < | smaller than | **`timestamp`** and all numeric types |
| >= | larger than or equal to | **`timestamp`** and all numeric types |
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
| = | equal to | all types |
| <> | not equal to | all types |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. For two or more conditions, only AND is supported, OR is not supported yet.
2. For filtering, only a single range is supported. For example, `value>20 and value<30` is a valid condition, but `value<20 AND value<>5` is an invalid condition
### Some Examples
- For the examples below, table tb1 is created via the following statements
```mysql
CREATE TABLE tb1 (ts timestamp, col1 int, col2 float, col3 binary(50))
```
- Query all the records in tb1 in the last hour:
```mysql
SELECT * FROM tb1 WHERE ts >= NOW - 1h
```
- Query all the records in tb1 between 2018-06-01 08:00:00.000 and 2018-06-02 08:00:00.000, and filter out only the records whose col3 value ends with 'nny', and sort the records by their timestamp in a descending order:
```mysql
SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC
```
- Query the sum of col1 and col2 as alias 'complex_metric', and filter on the timestamp and col2 values. Limit the number of returned rows to 10, and offset the result by 5.
```mysql
SELECT (col1 + col2) AS 'complex_metric' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' and col2 > 1.2 LIMIT 10 OFFSET 5
```
- Query the number of records in tb1 in the last 10 minutes, whose col2 value is larger than 3.14, and export the result to file `/home/testoutpu.csv`.
```mysql
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv
```
## SQL Functions
### Aggregation Functions
TDengine supports aggregations over numerical values, they are listed below:
- **COUNT**
```mysql
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]
```
Function: return the number of rows.
Return Data Type: `integer`.
Applicable Data Types: all.
Applied to: table/STable.
Note:
1) `*` can be used for all columns, as long as a column has non-NULL values, it will be counted.
2) If it is on a specific column, only rows with non-NULL values will be counted
- **AVG**
```mysql
SELECT AVG(field_name) FROM tb_name [WHERE clause]
```
Function: return the average value of a specific column.
Return Data Type: `double`.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
- **TWA**
```mysql
SELECT TWA(field_name) FROM tb_name WHERE clause
```
Function: return the time-weighted average value of a specific column
Return Data Type: `double`
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`
Applied to: table/STable
- **SUM**
```mysql
SELECT SUM(field_name) FROM tb_name [WHERE clause]
```
Function: return the sum of a specific column.
Return Data Type: `long integer` or `double`.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
- **STDDEV**
```mysql
SELECT STDDEV(field_name) FROM tb_name [WHERE clause]
```
Function: returns the standard deviation of a specific column.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table.
- **LEASTSQUARES**
```mysql
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
```
Function: performs a linear fit to the primary timestamp and the specified column.
Return Data Type: return a string of the coefficient and the interception of the fitted line.
Applicable Data Types: all types except timestamp, binary, nchar, bool.
Applied to: table.
Note: The timestmap is taken as the independent variable while the specified column value is taken as the dependent variables.
### Selector Functions
- **MIN**
```mysql
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]
```
Function: return the minimum value of a specific column.
Return Data Type: the same data type.
Applicable Data Types: all types except timestamp, binary, nchar, bool.
Applied to: table/STable.
- **MAX**
```mysql
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: return the maximum value of a specific column.
Return Data Type: the same data type.
Applicable Data Types: all types except timestamp, binary, nchar, bool.
Applied to: table/STable.
- **FIRST**
```mysql
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: return the first non-NULL value.
Return Data Type: the same data type.
Applicable Data Types: all types.
Applied to: table/STable.
Note: To return all columns, use first(*).
- **LAST**
```mysql
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: return the last non-NULL value.
Return Data Type: the same data type.
Applicable Data Types: all types.
Applied to: table/STable.
Note: To return all columns, use last(*).
- **TOP**
```mysql
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
Function: return the `k` largest values.
Return Data Type: the same data type.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note:
1) Valid range of `k`: 1≤*k*≤100
2) The associated `timestamp` will be returned too.
- **BOTTOM**
```mysql
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
Function: return the `k` smallest values.
Return Data Type: the same data type.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note:
1) valid range of `k`: 1≤*k*≤100;
2) The associated `timestamp` will be returned too.
- **PERCENTILE**
```mysql
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
```
Function: the value of the specified column below which `P` percent of the data points fall.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note: The range of `P` is `[0, 100]`. When `P=0` , `PERCENTILE` returns the equal value as `MIN`; when `P=100`, `PERCENTILE` returns the equal value as `MAX`.
- **APERCENTILE**
```mysql
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
```
Function: the value of the specified column below which `P` percent of the data points fall, it returns approximate value of percentile.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note: The range of `P` is `[0, 100]`. When `P=0` , `APERCENTILE` returns the equal value as `MIN`; when `P=100`, `APERCENTILE` returns the equal value as `MAX`. `APERCENTILE` has a much better performance than `PERCENTILE`.
- **LAST_ROW**
```mysql
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }
```
Function: return the last row.
Return Data Type: the same data type.
Applicable Data Types: all types.
Applied to: table/STable.
Note: different from last, last_row returns the last row even if it has NULL values.
### Transformation Functions
- **DIFF**
```mysql
SELECT DIFF(field_name) FROM tb_name [WHERE clause]
```
Function: return the difference between successive values of the specified column.
Return Data Type: the same data type.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table.
- **SPREAD**
```mysql
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: return the difference between the maximum and the mimimum value.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note: spread gives the range of data variation in a table/supertable; it is equivalent to `MAX()` - `MIN()`
- **Arithmetic Operations**
```mysql
SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]
```
Function: arithmetic operations on the selected columns.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note: 1) bracket can be used for operation priority; 2) If a column has NULL value, the result is NULL.
## Downsampling
Time-series data are usually sampled by sensors at a very high frequency, but more often we are only interested in the downsampled, aggregated data of each timeline. TDengine provides a convenient way to downsample the highly frequently sampled data points as well as filling the missing data with a variety of interpolation choices.
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval)
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[GROUP BY tags]
```
The downsampling time window is defined by `interval`, which is at least 10 milliseconds. The query returns a new series of downsampled data that has a series of fixed timestamps with an increment of `interval`.
For the time being, only function count, avg, sum, stddev, leastsquares, percentile, min, max, first, last are supported. Functions that may return multiple rows are not supported.
You can also use `FILL` to interpolate the intervals that don't contain any data.`FILL` currently supports four different interpolation strategies which are listed below:
| Interpolation | Usage |
| --------------------------------- | ------------------------------------------------------------ |
| `FILL(VALUE, val1 [, val2, ...])` | Interpolate with specified constants |
| `FILL(PREV)` | Interpolate with the value at the previous timestamp |
| `FILL(LINEAR)` | Linear interpolation with the non-null values at the previous timestamp and at the next timestamp |
| `FILL(NULL)` | Interpolate with **`NULL`** value |
A few downsampling examples:
- Find the number of data points, the maximum value of `col1` and minimum value of `col2` in a tb1 for every 10 minutes in the last 5 hours:
```mysql
SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts > NOW - 5h INTERVAL (10m)
```
- Fill the above downsampling results using constant-value interpolation:
```mysql
SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts > NOW - 5h INTERVAL(10m) FILL(VALUE, 0, 1, -1)
```
Note that the number of constant values in `FILL()` should be equal or fewer than the number of functions in the `SELECT` clause. Exceeding fill constants will be ignored.
- Fill the above downsampling results using `PREV` interpolation:
```mysql
SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts > NOW - 5h INTERVAL(10m) FILL(PREV)
```
This will interpolate missing data points with the value at the previous timestamp.
- Fill the above downsampling results using `NULL` interpolation:
```mysql
SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts > NOW - 5h INTERVAL(10m) FILL(NULL)
```
Fill **`NULL`** to the interpolated data points.
Notes:
1. `FILL` can generate tons of interpolated data points if the interval is small and the queried time range is large. So always remember to specify a time range when using interpolation. For each query with interpolation, the result set can not exceed 10,000,000 records.
2. The result set will always be sorted by time in ascending order.
3. If the query object is a supertable, then all the functions will be applied to all the tables that qualify the `WHERE` conditions. If the `GROUP BY` clause is also applied, the result set will be sorted ascendingly by time in each single group, otherwise, the result set will be sorted ascendingly by time as a whole.

View File

@ -1,231 +0,0 @@
#Administrator
## Directory and Files
After TDengine is installed, by default, the following directories will be created:
| Directory/File | Description |
| ---------------------- | :------------------------------ |
| /etc/taos/taos.cfg | TDengine configuration file |
| /usr/local/taos/driver | TDengine dynamic link library |
| /var/lib/taos | TDengine default data directory |
| /var/log/taos | TDengine default log directory |
| /usr/local/taos/bin. | TDengine executables |
### Executables
All TDengine executables are located at _/usr/local/taos/bin_ , including:
- `taosd`TDengine server
- `taos` TDengine Shell, the command line interface.
- `taosdump`TDengine data export tool
- `rmtaos` a script to uninstall TDengine
You can change the data directory and log directory setting through the system configuration file
## Configuration on Server
`taosd` is running on the server side, you can change the system configuration file taos.cfg to customize its behavior. By default, taos.cfg is located at /etc/taos, but you can specify the path to configuration file via the command line parameter -c. For example: `taosd -c /home/user` means the configuration file will be read from directory /home/user.
This section lists only the most important configuration parameters. Please check taos.cfg to find all the configurable parameters. **Note: to make your new configurations work, you have to restart taosd after you change taos.cfg**.
- mgmtShellPort: TCP and UDP port between client and TDengine mgmt (default: 6030). Note: 5 successive UDP ports (6030-6034) starting from this number will be used.
- vnodeShellPort: TCP and UDP port between client and TDengine vnode (default: 6035). Note: 5 successive UDP ports (6035-6039) starting from this number will be used.
- httpPort: TCP port for RESTful service (default: 6020)
- dataDir: data directory, default is /var/lib/taos
- maxUsers: maximum number of users allowed
- maxDbs: maximum number of databases allowed
- maxTables: maximum number of tables allowed
- enableMonitor: turn on/off system monitoring, 0: off, 1: on
- logDir: log directory, default is /var/log/taos
- numOfLogLines: maximum number of lines in the log file
- debugFlag: log level, 131: only error and warnings, 135: all
In different scenarios, data characteristics are different. For example, the retention policy, data sampling period, record size, the number of devices, and data compression may be different. To gain the best performance, you can change the following configurations related to storage:
- days: number of days to cover for a data file
- keep: number of days to keep the data
- rows: number of rows of records in a block in data file.
- comp: compression algorithm, 0: off, 1: standard; 2: maximum compression
- ctime: period (seconds) to flush data to disk
- clog: flag to turn on/off Write Ahead Log, 0: off, 1: on
- tables: maximum number of tables allowed in a vnode
- cache: cache block size (bytes)
- tblocks: maximum number of cache blocks for a table
- abloks: average number of cache blocks for a table
- precision: timestamp precision, us: microsecond ms: millisecond, default is ms
For an application, there may be multiple data scenarios. The best design is to put all data with the same characteristics into one database. One application may have multiple databases, and every database has its own configuration to maximize the system performance. You can specify the above configurations related to storage when you create a database. For example:
```mysql
CREATE DATABASE demo DAYS 10 CACHE 16000 ROWS 2000
```
The above SQL statement will create a database demo, with 10 days for each data file, 16000 bytes for a cache block, and 2000 rows in a file block.
The configuration provided when creating a database will overwrite the configuration in taos.cfg.
## Configuration on Client
*taos* is the TDengine shell and is a client that connects to taosd. TDengine uses the same configuration file taos.cfg for the client, with default location at /etc/taos. You can change it by specifying command line parameter -c when you run taos. For example, *taos -c /home/user*, it will read the configuration file taos.cfg from directory /home/user.
The parameters related to client configuration are listed below:
- masterIP: IP address of TDengine server
- charset: character set, default is the system . For data type nchar, TDengine uses unicode to store the data. Thus, the client needs to tell its character set.
- locale: system language setting
- defaultUser: default login user, default is root
- defaultPass: default password, default is taosdata
For TCP/UDP port, and system debug/log configuration, it is the same as the server side.
For server IP, user name, password, you can always specify them in the command line when you run taos. If they are not specified, they will be read from the taos.cfg
## User Management
System administrator (user root) can add, remove a user, or change the password from the TDengine shell. Commands are listed below:
Create a user, password shall be quoted with the single quote.
```mysql
CREATE USER user_name PASS password
```
Remove a user
```mysql
DROP USER user_name
```
Change the password for a user
```mysql
ALTER USER user_name PASS password
```
List all users
```mysql
SHOW USERS
```
## Import Data
Inside the TDengine shell, you can import data into TDengine from either a script or CSV file
**Import from Script**
```
source <filename>
```
Inside the file, you can put all SQL statements there. Each SQL statement has a line. If a line starts with "#", it means comments, it will be skipped. The system will execute the SQL statements line by line automatically until the ends
**Import from CSV**
```mysql
insert into tb1 file 'path/data.csv'
```
CSV file contains records for only one table, and the data structure shall be the same as the defined schema for the table. The header of CSV file shall be removed.
For example, the following is a sub-table d1001:
```mysql
taos> DESCRIBE d1001
Field | Type | Length | Note |
=================================================================================
ts | TIMESTAMP | 8 | |
current | FLOAT | 4 | |
voltage | INT | 4 | |
phase | FLOAT | 4 | |
location | BINARY | 64 | TAG |
groupid | INT | 4 | TAG |
```
The data format in data.csv like this
```csv
'2018-10-04 06:38:05.000',10.30000,219,0.31000
'2018-10-05 06:38:15.000',12.60000,218,0.33000
'2018-10-06 06:38:16.800',13.30000,221,0.32000
'2018-10-07 06:38:05.000',13.30000,219,0.33000
'2018-10-08 06:38:05.000',14.30000,219,0.34000
'2018-10-09 06:38:05.000',15.30000,219,0.35000
'2018-10-10 06:38:05.000',16.30000,219,0.31000
'2018-10-11 06:38:05.000',17.30000,219,0.32000
'2018-10-12 06:38:05.000',18.30000,219,0.31000
```
then data can be imported into database by this cmd:
```
taos> insert into d1001 file '~/data.csv';
Query OK, 9 row(s) affected (0.004763s)
```
## Export Data
You can export data either from TDengine shell or from tool taosdump.
**Export from TDengine Shell**
```mysql
select * from <tb_name> >> data.csv
```
The above SQL statement will dump the query result set into data.csv file.
**Export Using taosdump**
TDengine provides a data dumping tool taosdump. You can choose to dump a database, a table, all data or data only a time range, even only the metadata. For example:
- Export one or more tables in a DB: taosdump [OPTION…] dbname tbname …
- Export one or more DBs: taosdump [OPTION…] --databases dbname…
- Export all DBs (excluding system DB): taosdump [OPTION…] --all-databases
run *taosdump —help* to get a full list of the options
## Management of Connections, Streams, Queries
The system administrator can check, kill the ongoing connections, streams, or queries.
```
SHOW CONNECTIONS
```
It lists all connections. The first column shows connection-id from the client.
```
KILL CONNECTION <connection-id>
```
It kills the connection, where connection-id is the number of the first column showed by "SHOW CONNECTIONS".
```
SHOW QUERIES
```
It shows the ongoing queries. The first column shows the connection-id:query-no, where connection-id is the connection from the client, and id assigned by the system
```
KILL QUERY <query-id>
```
It kills the query, where query-id is the connection-id:query-no showed by "SHOW QUERIES". You can copy and paste it.
```
SHOW STREAMS
```
It shows the continuous queries. The first column shows the connection-id:stream-no, where connection-id is the connection from the client, and id assigned by the system.
```
KILL STREAM <stream-id>
```
It kills the continuous query, where stream-id is the connection-id:stream-no showed by "SHOW STREAMS". You can copy and paste it.
## System Monitor
TDengine runs a system monitor in the background. Once it is started, it will create a database sys automatically. System monitor collects the metric like CPU, memory, network, disk, number of requests periodically, and writes them into database sys. Also, TDengine will log all important actions, like login, logout, create database, drop database and so on, and write them into database sys.
You can check all the saved monitor information from database sys. By default, system monitor is turned on. But you can turn it off by changing the parameter in the configuration file.

View File

@ -1,80 +0,0 @@
#Advanced Features
##Continuous Query
Continuous Query is a query executed by TDengine periodically with a sliding window, it is a simplified stream computing driven by timers, not by events. Continuous query can be applied to a table or a STable, and the result set can be passed to the application directly via call back function, or written into a new table in TDengine. The query is always executed on a specified time window (window size is specified by parameter interval), and this window slides forward while time flows (the sliding period is specified by parameter sliding).
Continuous query is defined by TAOS SQL, there is nothing special. One of the best applications is downsampling. Once it is defined, at the end of each cycle, the system will execute the query, pass the result to the application or write it to a database.
If historical data pints are inserted into the stream, the query won't be re-executed, and the result set won't be updated. If the result set is passed to the application, the application needs to keep the status of continuous query, the server won't maintain it. If application re-starts, it needs to decide the time where the stream computing shall be started.
####How to use continuous query
- Pass result set to application
Application shall use API taos_stream (details in connector section) to start the stream computing. Inside the API, the SQL syntax is:
```sql
SELECT aggregation FROM [table_name | stable_name]
INTERVAL(window_size) SLIDING(period)
```
where the new keyword INTERVAL specifies the window size, and SLIDING specifies the sliding period. If parameter sliding is not specified, the sliding period will be the same as window size. The minimum window size is 10ms. The sliding period shall not be larger than the window size. If you set a value larger than the window size, the system will adjust it to window size automatically.
For example:
```sql
SELECT COUNT(*) FROM FOO_TABLE
INTERVAL(1M) SLIDING(30S)
```
The above SQL statement will count the number of records for the past 1-minute window every 30 seconds.
- Save the result into a database
If you want to save the result set of stream computing into a new table, the SQL shall be:
```sql
CREATE TABLE table_name AS
SELECT aggregation from [table_name | stable_name]
INTERVAL(window_size) SLIDING(period)
```
Also, you can set the time range to execute the continuous query. If no range is specified, the continuous query will be executed forever. For example, the following continuous query will be executed from now and will stop in one hour.
```sql
CREATE TABLE QUERY_RES AS
SELECT COUNT(*) FROM FOO_TABLE
WHERE TS > NOW AND TS <= NOW + 1H
INTERVAL(1M) SLIDING(30S)
```
###Manage the Continuous Query
Inside TDengine shell, you can use the command "show streams" to list the ongoing continuous queries, the command "kill stream" to kill a specific continuous query.
If you drop a table generated by the continuous query, the query will be removed too.
##Publisher/Subscriber
Time series data is a sequence of data points over time. Inside a table, the data points are stored in order of timestamp. Also, there is a data retention policy, the data points will be removed once their lifetime is passed. From another view, a table in DTengine is just a standard message queue.
To reduce the development complexity and improve data consistency, TDengine provides the pub/sub functionality. To publish a message, you simply insert a record into a table. Compared with popular messaging tool Kafka, you subscribe to a table or a SQL query statement, instead of a topic. Once new data points arrive, TDengine will notify the application. The process is just like Kafka.
The detailed API will be introduced in the [connectors](https://www.taosdata.com/en/documentation/advanced-features/) section.
##Caching
TDengine allocates a fixed-size buffer in memory, the newly arrived data will be written into the buffer first. Every device or table gets one or more memory blocks. For typical IoT scenarios, the hot data shall always be newly arrived data, they are more important for timely analysis. Based on this observation, TDengine manages the cache blocks in First-In-First-Out strategy. If no enough space in the buffer, the oldest data will be saved into hard disk first, then be overwritten by newly arrived data. TDengine also guarantees every device can keep at least one block of data in the buffer.
By this design, the application can retrieve the latest data from each device super-fast, since they are all available in memory. You can use last or last_row function to return the last data record. If the super table is used, it can be used to return the last data records of all or a subset of devices. For example, to retrieve the latest temperature from thermometers in located Beijing, execute the following SQL
```mysql
select last(*) from thermometers where location=beijing
```
By this design, caching tool, like Redis, is not needed in the system. It will reduce the complexity of the system.
TDengine creates one or more virtual nodes(vnode) in each data node. Each vnode contains data for multiple tables and has its own buffer. The buffer of a vnode is fully separated from the buffer of another vnode, not shared. But the tables in a vnode share the same buffer.
System configuration parameter cacheBlockSize configures the cache block size in bytes, and another parameter cacheNumOfBlocks configures the number of cache blocks. The total memory for the buffer of a vnode is $cacheBlockSize \times cacheNumOfBlocks$. Another system parameter numOfBlocksPerMeter configures the maximum number of cache blocks a table can use. When you create a database, you can specify these parameters.

View File

@ -1,101 +0,0 @@
# Data Model and Architecture
## Data Model
### A Typical IoT Scenario
In a typical IoT scenario, there are many types of devices. Each device is collecting one or multiple metrics. For a specific type of device, the collected data looks like the table below:
| Device ID | Time Stamp | Value 1 | Value 2 | Value 3 | Tag 1 | Tag 2 |
| :-------: | :-----------: | :-----: | :-----: | :-----: | :---: | :---: |
| D1001 | 1538548685000 | 10.3 | 219 | 0.31 | Red | Tesla |
| D1002 | 1538548684000 | 10.2 | 220 | 0.23 | Blue | BMW |
| D1003 | 1538548686500 | 11.5 | 221 | 0.35 | Black | Honda |
| D1004 | 1538548685500 | 13.4 | 223 | 0.29 | Red | Volvo |
| D1001 | 1538548695000 | 12.6 | 218 | 0.33 | Red | Tesla |
| D1004 | 1538548696600 | 11.8 | 221 | 0.28 | Black | Honda |
Each data record has device ID, timestamp, the collected metrics, and static tags associated with the device. Each device generates a data record in a pre-defined timer or triggered by an event. It is a sequence of data points, like a stream.
### Data Characteristics
Being a series of data points over time, data points generated by devices, sensors, servers, or applications have strong common characteristics.
1. metric is always structured data;
2. there are rarely delete/update operations on collected data;
3. there is only one single data source for one device or sensor;
4. ratio of read/write is much lower than typical Internet application;
5. the user pays attention to the trend of data, not the specific value at a specific time;
6. there is always a data retention policy;
7. the data query is always executed in a given time range and a subset of devices;
8. real-time aggregation or analytics is mandatory;
9. traffic is predictable based on the number of devices and sampling frequency;
10. data volume is huge, a system may generate 10 billion data points in a day.
By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data. The system efficiency is improved significantly.
### Relational Database Model
Since time-series data is more likely to be structured data, TDengine adopts the traditional relational database model to process them. You need to create a database, create tables with schema definition, then insert data points and execute queries to explore the data. Standard SQL is used, there is no learning curve.
### One Table for One Device
Due to different network latency, the data points from different devices may arrive at the server out of order. But for the same device, data points will arrive at the server in order if system is designed well. To utilize this special feature, TDengine requires the user to create a table for each device (time-stream). For example, if there are over 10,000 smart meters, 10,000 tables shall be created. For the table above, 4 tables shall be created for device D1001, D1002, D1003 and D1004, to store the data collected.
This strong requirement can guarantee the data points from a device can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one device in a time range, this design will reduce the read latency significantly since a whole block is owned by one single device. Also, write latency can be significantly reduced too, since the data points generated by the same device will arrive in order, the new data point will be simply appended to a block. Cache block size and the rows of records in a file block can be configured to fit the scenarios.
### Best Practices
**Table**: TDengine suggests to use device ID as the table name (like D1001 in the above diagram). Each device may collect one or more metrics (like value1, valu2, valu3 in the diagram). Each metric has a column in the table, the metric name can be used as the column name. The data type for a column can be int, float, double, tinyint, bigint, bool or binary. Sometimes, a device may have multiple metric group, each group have different sampling period, you shall create a table for each group for each device. The first column in the table must be time stamp. TDengine uses time stamp as the index, and wont build the index on any metrics stored.
**Tags:** to support aggregation over multiple tables efficiently, [STable(Super Table)](../super-table) concept is introduced by TDengine. A STable is used to represent the same type of device. The schema is used to define the collected metrics(like value1, value2, value3 in the diagram), and tags are used to define the static attributes for each table or device(like tag1, tag2 in the diagram). A table is created via STable with a specific tag value. All or a subset of tables in a STable can be aggregated by filtering tag values.
**Database:** different types of devices may generate data points in different patterns and shall be processed differently. For example, sampling frequency, data retention policy, replication number, cache size, record size, the compression algorithm may be different. To make the system more efficient, TDengine suggests creating a different database with unique configurations for different scenarios
**Schemaless vs Schema:** compared with NoSQL database, since a table with schema definition shall be created before the data points can be inserted, flexibilities are not that good, especially when the schema is changed. But in most IoT scenarios, the schema is well defined and is rarely changed, the loss of flexibilities wont be a big pain to developers or the administrator. TDengine allows the application to change the schema in a second even there is a huge amount of historical data when schema has to be changed.
TDengine does not impose a limitation on the number of tables, [STables](../super-table), or databases. You can create any number of STable or databases to fit the scenarios.
## Architecture
There are two main modules in TDengine server as shown in Picture 1: **Management Module (MGMT)** and **Data Module(DNODE)**. The whole TDengine architecture also includes a **TDengine Client Module**.
<center> <img src="../assets/structure.png"> </center>
<center> Picture 1 TDengine Architecture </center>
### MGMT Module
The MGMT module deals with the storage and querying on metadata, which includes information about users, databases, and tables. Applications will connect to the MGMT module at first when connecting the TDengine server. When creating/dropping databases/tables, The request is sent to the MGMT module at first to create/delete metadata. Then the MGMT module will send requests to the data module to allocate/free resources required. In the case of writing or querying, applications still need to visit MGMT module to get meta data, according to which, then access the DNODE module.
### DNODE Module
The DNODE module is responsible for storing and querying data. For the sake of future scaling and high-efficient resource usage, TDengine applies virtualization on resources it uses. TDengine introduces the concept of virtual node (vnode), which is the unit of storage, resource allocation and data replication (enterprise edition). As is shown in Picture 2, TDengine treats each data node as an aggregation of vnodes.
When a DB is created, the system will allocate a vnode. Each vnode contains multiple tables, but a table belongs to only one vnode. Each DB has one or mode vnodes, but one vnode belongs to only one DB. Each vnode contains all the data in a set of tables. Vnodes have their own cache, directory to store data. Resources between different vnodes are exclusive with each other, no matter cache or file directory. However, resources in the same vnode are shared between all the tables in it. By virtualization, TDengine can distribute resources reasonably to each vnode and improve resource usage and concurrency. The number of vnodes on a dnode is configurable according to its hardware resources.
<center> <img src="../assets/vnode.png"> </center>
<center> Picture 2 TDengine Virtualization </center>
### Client Module
TDengine client module accepts requests (mainly in SQL form) from applications and converts the requests to internal representations and sends to the server side. TDengine supports multiple interfaces, which are all built on top of TDengine client module.
For the communication between client and MGMT module, TCP/UDP is used, the port is set by the parameter mgmtShellPort in system configuration file taos.cfg, default is 6030. For the communication between client and DNODE module, TCP/UDP is used, the port is set by the parameter vnodeShellPort in the system configuration file, default is 6035.
## Writing Process
Picture 3 shows the full writing process of TDengine. TDengine uses [Writing Ahead Log] (WAL) strategy to assure data security and integrity. Data received from the client is written to the commit log at first. When TDengine recovers from crashes caused by power lose or other situations, the commit log is used to recover data. After writting to commit log, data will be wrtten to the corresponding vnode cache, then an acknowledgment is sent to the application. There are two mechanisms that can flush data in cache to disk for persistent storage:
1. **Flush driven by timer**: There is a backend timer which flushes data in cache periodically to disks. The period is configurable via parameter commitTime in system configuration file taos.cfg.
2. **Flush driven by data**: Data in the cache is also flushed to disks when the left buffer size is below a threshold. Flush driven by data can reset the timer of flush driven by the timer.
<center> <img src="../assets/write_process.png"> </center>
<center> Picture 3 TDengine Writting Process </center>
New commit log file will be opened when the committing process begins. When the committing process finishes, the old commit file will be removed.
## Data Storage
TDengine data are saved in _/var/lib/taos_ directory by default. It can be changed to other directories by setting the parameter dataDir in system configuration file taos.cfg.
TDengine's metadata includes the database, table, user, super table and tag information. To reduce the latency, metadata are all buffered in the cache.
Data records saved in tables are sharded according to the time range. Data of tables in the same vnode in a certain time range are saved in the same file group. This sharding strategy can effectively improve data searching speed. By default, one group of files contain data in 10 days, which can be configured by *daysPerFile* in the configuration file or by *DAYS* keyword in *CREATE DATABASE* clause.
Data records are removed automatically once their lifetime is passed. The lifetime is configurable via parameter daysToKeep in the system configuration file. The default value is 3650 days.
Data in files are blockwise. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp. To improve the compression ratio, records are stored column by column, and the different compression algorithm is applied based on each column's data type.

View File

@ -1,46 +0,0 @@
#FAQ
#### 1. How to upgrade TDengine from 1.X versions to 2.X and above versions?
Version 2.X is a complete refactoring of the previous version, and configuration files and data files are incompatible. Be sure to do the following before upgrading:
1. Delete the configuration file, and execute <code>sudo rm -rf /etc/taos/taos</code>
2. Delete the log file, and execute <code>sudo rm -rf /var/log/taos </code>
3. ENSURE THAT YOUR DATAS ARE NO LONGER NEEDED! Delete the data file, and execute <code>sudo rm -rf /var/lib/taos </code>
4. Enjoy the latest stable version of TDengine
5. If the data needs to be migrated or the data file is corrupted, please contact the official technical support team for assistance
#### 2. When encoutered with the error "Unable to establish connection", what can I do?
The client may encounter connection errors. Please follow the steps below for troubleshooting:
1. Make sure that the client and server version Numbers are exactly the same, and that the open source community and Enterprise versions are not mixed.
2. On the server side, execute `systemctl status taosd` to check the status of *taosd* service. If *taosd* is not running, start it and retry connecting.
3. Make sure you have used the correct server IP address to connect to.
4. Ping the server. If no response is received, check your network connection.
5. Check the firewall setting, make sure the TCP/UDP ports from 6030-6039 are enabled.
6. For JDBC, ODBC, Python, Go connections on Linux, make sure the native library *libtaos.so* are located at /usr/local/lib/taos, and /usr/local/lib/taos is in the *LD_LIBRARY_PATH*.
7. For JDBC, ODBC, Python, Go connections on Windows, make sure *driver/c/taos.dll* is in the system search path (or you can copy taos.dll into *C:\Windows\System32*)
8. If the above steps can not help, try the network diagnostic tool *nc* to check if TCP/UDP port works
check UDP port`nc -vuz {hostIP} {port} `
check TCP port on server: `nc -l {port}`
check TCP port on client: ` nc {hostIP} {port}`
#### 3. Why I get "Invalid SQL" error when a query is syntactically correct?
If you are sure your query has correct syntax, please check the length of the SQL string. Before version 2.0, it shall be less than 64KB.
#### 4. Does TDengine support validation queries?
For the time being, TDengine does not have a specific set of validation queries. However, TDengine comes with a system monitoring database named 'sys', which can usually be used as a validation query object.
#### 5. Can I delete or update a record that has been written into TDengine?
The answer is NO. The design of TDengine is based on the assumption that records are generated by the connected devices, you won't be allowed to change it. But TDengine provides a retention policy, the data records will be removed once their lifetime is passed.
#### 6. What is the most efficient way to write data to TDengine?
TDengine supports several different writing regimes. The most efficient way to write data to TDengine is to use batch inserting. For details on batch insertion syntax, please refer to [Taos SQL](../documentation/taos-sql)

View File

@ -220,10 +220,6 @@ int32_t bnAllocVnodes(SVgObj *pVgroup) {
}
static bool bnCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) {
if (pVgroup->lbTime + 5 * tsStatusInterval > tsAccessSquence) {
return false;
}
int32_t rmVnodeVer = 0;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVnode = pVgroup->vnodeGid + i;
@ -371,6 +367,7 @@ static bool bnMonitorBalance() {
for (int32_t dest = 0; dest < src; dest++) {
SDnodeObj *pDestDnode = tsBnDnodes.list[dest];
if (bnCheckDnodeInVgroup(pDestDnode, pVgroup)) continue;
if (taosGetTimestampMs() - pDestDnode->createdTime < 2000) continue;
float destScore = bnTryCalcDnodeScore(pDestDnode, 1);
if (srcScore + 0.0001 < destScore) continue;
@ -405,7 +402,7 @@ void bnReset() {
if (pDnode == NULL) break;
// while master change, should reset dnode to offline
mInfo("dnode:%d set access:%d to 0", pDnode->dnodeId, pDnode->lastAccess);
mInfo("dnode:%d set access:%" PRId64 " to 0", pDnode->dnodeId, pDnode->lastAccess);
pDnode->lastAccess = 0;
if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
@ -499,7 +496,7 @@ static bool bnMontiorDropping() {
if (dnodeIsMasterEp(pDnode->dnodeEp)) continue;
if (mnodeGetDnodesNum() <= 1) continue;
mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId,
mLInfo("dnode:%d, set to removing state for it offline:%" PRId64 " seconds", pDnode->dnodeId,
tsAccessSquence - pDnode->lastAccess);
pDnode->status = TAOS_DN_STATUS_DROPPING;
@ -574,8 +571,8 @@ void bnCheckStatus() {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence,
pDnode->lastAccess, pDnode->status);
mInfo("dnode:%d, set to offline state, access seq:%" PRId64 " last seq:%" PRId64 " laststat:%d", pDnode->dnodeId,
tsAccessSquence, pDnode->lastAccess, pDnode->status);
bnSetVgroupOffline(pDnode);
bnStartTimer(3000);
}

View File

@ -102,12 +102,12 @@ static void bnProcessTimer(void *handle, void *tmrId) {
if (tsBnThread.stop) return;
tsBnThread.timer = NULL;
tsAccessSquence++;
bnStartTimer(-1);
bnCheckStatus();
if (handle == NULL) {
++tsAccessSquence;
if (tsAccessSquence % tsBalanceInterval == 0) {
mDebug("balance function is scheduled by timer");
bnPostSignal();
@ -122,8 +122,7 @@ static void bnProcessTimer(void *handle, void *tmrId) {
void bnStartTimer(int32_t mseconds) {
if (tsBnThread.stop) return;
bool updateSoon = (mseconds != -1);
if (updateSoon) {
if (mseconds != -1) {
mTrace("balance function will be called after %d ms", mseconds);
taosTmrReset(bnProcessTimer, mseconds, (void *)(int64_t)mseconds, tsMnodeTmr, &tsBnThread.timer);
} else {

View File

@ -123,7 +123,8 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
*/
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsDiffQuery(SQueryInfo* pQueryInfo);
bool tscIsIrateQuery(SQueryInfo* pQueryInfo);
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);

View File

@ -51,10 +51,10 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: getResultTimePrecision
* Signature: (J)J
* Method: getResultTimePrecisionImp
* Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecisionImp
(JNIEnv *, jobject, jlong, jlong);
/*

View File

@ -113,7 +113,7 @@ static void jniGetGlobalMethod(JNIEnv *env) {
g_rowdataSetFloatFp = (*env)->GetMethodID(env, g_rowdataClass, "setFloat", "(IF)V");
g_rowdataSetDoubleFp = (*env)->GetMethodID(env, g_rowdataClass, "setDouble", "(ID)V");
g_rowdataSetStringFp = (*env)->GetMethodID(env, g_rowdataClass, "setString", "(ILjava/lang/String;)V");
g_rowdataSetTimestampFp = (*env)->GetMethodID(env, g_rowdataClass, "setTimestamp", "(IJ)V");
g_rowdataSetTimestampFp = (*env)->GetMethodID(env, g_rowdataClass, "setTimestamp", "(IJI)V");
g_rowdataSetByteArrayFp = (*env)->GetMethodID(env, g_rowdataClass, "setByteArray", "(I[B)V");
(*env)->DeleteLocalRef(env, rowdataClass);
@ -519,9 +519,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
jniFromNCharToByteArray(env, (char *)row[i], length[i]));
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i]));
case TSDB_DATA_TYPE_TIMESTAMP: {
int precision = taos_result_precision(result);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i]), precision);
break;
}
default:
break;
}
@ -672,7 +674,15 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(J
return (*env)->NewStringUTF(env, (const char *)tsCharset);
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision(JNIEnv *env, jobject jobj, jlong con,
/**
* Get Result Time Precision
* @param env vm
* @param jobj the TSDBJNIConnector java object
* @param con the c connection pointer
* @param res the TAOS_RES object, i.e. the SSqlObject
* @return precision 0:ms 1:us 2:ns
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultTimePrecisionImp(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {

View File

@ -951,7 +951,10 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c
}
}
assert(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams));
if (!(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams))) {
tscError("0x%"PRIx64" invalid colIdx:%d", pStmt->pSql->self, colIdx);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "invalid param colIdx");
}
uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + rowNum) * pBlock->rowSize;
if (totalDataSize > pBlock->nAllocSize) {
@ -1462,6 +1465,11 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
}
if (sql == NULL) {
tscError("sql is NULL");
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "sql is NULL"));
}
if (pStmt->last != STMT_INIT) {
tscError("prepare status error, last:%d", pStmt->last);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "prepare status error"));
@ -1528,13 +1536,14 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags) {
STscStmt* pStmt = (STscStmt*)stmt;
SSqlObj* pSql = pStmt->pSql;
SSqlCmd* pCmd = &pSql->cmd;
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
}
SSqlObj* pSql = pStmt->pSql;
SSqlCmd* pCmd = &pSql->cmd;
if (name == NULL) {
tscError("0x%"PRIx64" name is NULL", pSql->self);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "name is NULL"));
@ -1735,7 +1744,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
}
if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX || colIdx < 0) {
tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param"));
}

View File

@ -112,6 +112,7 @@ static int32_t validateDNodeConfig(SMiscInfo* pOptions);
static int32_t validateLocalConfig(SMiscInfo* pOptions);
static int32_t validateColumnName(char* name);
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType);
static int32_t setCompactVnodeInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
@ -152,10 +153,7 @@ int16_t getNewResColId(SSqlCmd* pCmd) {
// formate "type | size | value"
bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType) {
bool ret = false;
if (!pList || pList->size <= 0) {
return ret;
}
if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) {
if (!pList || pList->size <= 0 || colType < 0) {
return ret;
}
@ -429,18 +427,6 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if(code != TSDB_CODE_SUCCESS) {
return code;
}
if (pInfo->pMiscInfo->tableType == TSDB_SUPER_TABLE) {
//// code = tscGetTableMeta(pSql, pTableMetaInfo);
//// if (code != TSDB_CODE_SUCCESS) {
//// return code;
//// }
//
// if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
// return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
// }
}
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
if (pzName->type == TK_STRING) {
pzName->n = strdequote(pzName->z);
@ -778,6 +764,11 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pCmd->active = pCmd->pQueryInfo;
pCmd->command = pCmd->pQueryInfo->command;
STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pCmd->active, 0);
if (pTableMetaInfo1->pTableMeta != NULL) {
pSql->res.precision = tscGetTableInfo(pTableMetaInfo1->pTableMeta).precision;
}
return TSDB_CODE_SUCCESS; // do not build query message here
}
@ -809,7 +800,13 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
break;
}
case TSDB_SQL_COMPACT_VNODE:{
const char* msg = "invalid compact";
if (setCompactVnodeInfo(pSql, pInfo) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
break;
}
default:
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
}
@ -1795,7 +1792,6 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
assert(pSelNodeList != NULL && pCmd != NULL);
const char* msg1 = "too many items in selection clause";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg4 = "only support distinct one tag";
@ -2023,8 +2019,10 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
SSchema colSchema = *tGetTbnameColumnSchema();
getColumnName(pItem, colSchema.name, colSchema.name, sizeof(colSchema.name) - 1);
char name[TSDB_COL_NAME_LEN] = {0};
getColumnName(pItem, name, colSchema.name, sizeof(colSchema.name) - 1);
tstrncpy(colSchema.name, name, TSDB_COL_NAME_LEN);
/*SExprInfo* pExpr = */tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, TSDB_COL_TAG, getNewResColId(pCmd));
} else {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@ -2724,6 +2722,8 @@ void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawName, in
strncpy(rawName, pItem->pNode->token.z, len);
if (pItem->aliasName != NULL) {
int32_t aliasNameLen = (int32_t) strlen(pItem->aliasName);
len = (aliasNameLen < nameLength)? aliasNameLen:nameLength;
strncpy(resultFieldName, pItem->aliasName, len);
} else {
strncpy(resultFieldName, rawName, len);
@ -2970,7 +2970,12 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
return TSDB_CODE_SUCCESS;
}
static int32_t setCompactVnodeInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSqlCmd* pCmd = &pSql->cmd;
pCmd->command = pInfo->type;
return TSDB_CODE_SUCCESS;
}
bool validateIpAddress(const char* ip, size_t size) {
char tmp[128] = {0}; // buffer to build null-terminated string
assert(size < 128);
@ -3056,8 +3061,8 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) {
}
bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg1 = "TWA/Diff not allowed to apply to super table directly";
const char* msg2 = "TWA/Diff only support group by tbname for super table query";
const char* msg1 = "TWA/Diff/Derivative/Irate not allowed to apply to super table directly";
const char* msg2 = "TWA/Diff/Derivative/Irate only support group by tbname for super table query";
const char* msg3 = "function not support for super table query";
// filter sql function not supported by metric query yet.
@ -3070,7 +3075,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
}
}
if (tscIsTWAQuery(pQueryInfo) || tscIsDiffQuery(pQueryInfo)) {
if (tscIsTWAQuery(pQueryInfo) || tscIsDiffDerivQuery(pQueryInfo) || tscIsIrateQuery(pQueryInfo)) {
if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return true;
@ -4624,12 +4629,8 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
free(tmp);
} else {
double tmp;
if (p->_node.optr == TSDB_RELATION_IN) {
retVal = validateParamOfRelationIn(vVariant, schemaType);
} else {
retVal = tVariantDump(vVariant, (char*)&tmp, schemaType, false);
}
}
if (retVal != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
@ -4960,7 +4961,10 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
win->skey = val;
} else if (optr == TK_EQ) {
win->ekey = win->skey = val;
} else if (optr == TK_NE) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
return TSDB_CODE_SUCCESS;
}
@ -6549,13 +6553,13 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t functId = pExpr->base.functionId;
int32_t f = pExpr->base.functionId;
/*
* group by normal columns.
* Check if the column projection is identical to the group by column or not
*/
if (functId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
if (f == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
bool qualified = false;
for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
@ -6570,12 +6574,12 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
}
}
if (IS_MULTIOUTPUT(aAggs[functId].status) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
functId != TSDB_FUNC_DIFF && functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) {
if (IS_MULTIOUTPUT(aAggs[f].status) && f != TSDB_FUNC_TOP && f != TSDB_FUNC_BOTTOM &&
f != TSDB_FUNC_DIFF && f != TSDB_FUNC_DERIVATIVE && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_PRJ) {
return invalidOperationMsg(msg, msg1);
}
if (functId == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
if (f == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidOperationMsg(msg, msg1);
}
}
@ -7800,6 +7804,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
const char* msg5 = "only tag query not compatible with normal column filter";
const char* msg6 = "not support stddev/percentile in outer query yet";
const char* msg7 = "drivative requires timestamp column exists in subquery";
int32_t code = TSDB_CODE_SUCCESS;
@ -7849,11 +7854,27 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
}
}
// todo derivative function requires ts column exists in subquery
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, 0);
if (tscNumOfExprs(pQueryInfo) > 1) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, 1);
if (pExpr->base.functionId == TSDB_FUNC_DERIVATIVE && pSchema->type != TSDB_DATA_TYPE_TIMESTAMP) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
}
// validate the query filter condition info
if (pSqlNode->pWhere != NULL) {
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (pTableMeta->tableInfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
}
}
// validate the interval info
@ -7874,7 +7895,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
}
// set order by info
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@ -8025,7 +8045,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
pQueryInfo->diffQuery = tscIsDiffQuery(pQueryInfo);
SExprInfo** p = NULL;
int32_t numOfExpr = 0;
@ -8137,18 +8156,23 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
return TSDB_CODE_SUCCESS;
} else if (pSqlExpr->tokenId == TK_SET) {
int32_t type = -1;
int32_t colType = -1;
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
if (pCols != NULL) {
SColIndex* idx = taosArrayGet(pCols, 0);
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
if (pSchema != NULL) {
type = pSchema->type;
colType = pSchema->type;
}
}
tVariant *pVal;
if (serializeExprListToVariant(pSqlExpr->pParam, &pVal, type) == false) {
if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) {
colType = TSDB_DATA_TYPE_BIGINT;
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
colType = TSDB_DATA_TYPE_DOUBLE;
}
if (serializeExprListToVariant(pSqlExpr->pParam, &pVal, colType) == false) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression");
}
*pExpr = calloc(1, sizeof(tExprNode));

View File

@ -47,6 +47,31 @@ static int32_t getWaitingTimeInterval(int32_t count) {
return initial * ((2u)<<(count - 2));
}
static int32_t vgIdCompare(const void *lhs, const void *rhs) {
int32_t left = *(int32_t *)lhs;
int32_t right = *(int32_t *)rhs;
if (left == right) {
return 0;
} else {
return left > right ? 1 : -1;
}
}
static int32_t removeDupVgid(int32_t *src, int32_t sz) {
if (src == NULL || sz <= 0) {
return 0;
}
qsort(src, sz, sizeof(src[0]), vgIdCompare);
int32_t ret = 1;
for (int i = 1; i < sz; i++) {
if (src[i] != src[i - 1]) {
src[ret++] = src[i];
}
}
return ret;
}
static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) {
assert(pEpSet != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
@ -1526,6 +1551,60 @@ int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
int tscBuildCompactMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (pInfo->list == NULL || taosArrayGetSize(pInfo->list) <= 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
SArray *pList = pInfo->list;
int32_t size = (int32_t)taosArrayGetSize(pList);
int32_t *result = malloc(sizeof(int32_t) * size);
if (result == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < size; i++) {
tSqlExprItem* pSub = taosArrayGet(pList, i);
tVariant* pVar = &pSub->pNode->value;
if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) {
result[i] = (int32_t)(pVar->i64);
} else {
free(result);
return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
int count = removeDupVgid(result, size);
pCmd->payloadLen = sizeof(SCompactMsg) + count * sizeof(int32_t);
pCmd->msgType = TSDB_MSG_TYPE_CM_COMPACT_VNODE;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
free(result);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCompactMsg *pCompactMsg = (SCompactMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
if (tNameIsEmpty(&pTableMetaInfo->name)) {
pthread_mutex_lock(&pObj->mutex);
tstrncpy(pCompactMsg->db, pObj->db, sizeof(pCompactMsg->db));
pthread_mutex_unlock(&pObj->mutex);
} else {
tNameGetFullDbName(&pTableMetaInfo->name, pCompactMsg->db);
}
pCompactMsg->numOfVgroup = htons(count);
for (int32_t i = 0; i < count; i++) {
pCompactMsg->vgid[i] = htons(result[i]);
}
free(result);
return TSDB_CODE_SUCCESS;
}
int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
@ -2279,6 +2358,10 @@ int tscProcessAlterDbMsgRsp(SSqlObj *pSql) {
UNUSED(pSql);
return 0;
}
int tscProcessCompactRsp(SSqlObj *pSql) {
UNUSED(pSql);
return TSDB_CODE_SUCCESS;
}
int tscProcessShowCreateRsp(SSqlObj *pSql) {
return tscLocalResultCommonBuilder(pSql, 1);
@ -2665,6 +2748,7 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_ALTER_TABLE] = tscBuildAlterTableMsg;
tscBuildMsg[TSDB_SQL_UPDATE_TAGS_VAL] = tscBuildUpdateTagMsg;
tscBuildMsg[TSDB_SQL_ALTER_DB] = tscAlterDbMsg;
tscBuildMsg[TSDB_SQL_COMPACT_VNODE] = tscBuildCompactMsg;
tscBuildMsg[TSDB_SQL_CONNECT] = tscBuildConnectMsg;
tscBuildMsg[TSDB_SQL_USE_DB] = tscBuildUseDbMsg;
@ -2705,6 +2789,7 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_ALTER_TABLE] = tscProcessAlterTableMsgRsp;
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
tscProcessMsgRsp[TSDB_SQL_COMPACT_VNODE] = tscProcessCompactRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_STABLE] = tscProcessShowCreateRsp;

View File

@ -283,6 +283,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
SArray* tables = getTableList(pSql);
if (tables == NULL) {
pSub->lastSyncTime = 0; //force to get table list next time
return 0;
}
size_t numOfTables = taosArrayGetSize(tables);
@ -489,7 +490,15 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
SSub *pSub = (SSub *)tsub;
if (pSub == NULL) return NULL;
if (pSub->pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
if (pSub->pTimer == NULL) {
int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime;
if (duration < (int64_t)(pSub->interval)) {
tscDebug("subscription consume too frequently, blocking...");
taosMsleep(pSub->interval - (int32_t)duration);
}
}
if (pSub->pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { //may reach here when retireve stable vgroup failed
SSqlObj* pSql = recreateSqlObj(pSub);
if (pSql == NULL) {
return NULL;
@ -503,6 +512,11 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
pSub->pSql = pSql;
pSql->pSubscription = pSub;
// no table list now, force to update it
tscDebug("begin table synchronization");
if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL;
tscDebug("table synchronization completed");
}
tscSaveSubscriptionProgress(pSub);
@ -527,14 +541,6 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
tscDebug("subscribe:%s set next round subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
}
if (pSub->pTimer == NULL) {
int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime;
if (duration < (int64_t)(pSub->interval)) {
tscDebug("subscription consume too frequently, blocking...");
taosMsleep(pSub->interval - (int32_t)duration);
}
}
size_t size = taosArrayGetSize(pSub->progress) * sizeof(STableIdInfo);
size += sizeof(SQueryTableMsg) + 4096;
int code = tscAllocPayload(&pSql->cmd, (int)size);

View File

@ -3152,6 +3152,13 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
// it is the failure retry insert
if (pSql->pSubs != NULL) {
int32_t blockNum = (int32_t)taosArrayGetSize(pCmd->insertParam.pDataBlocks);
if (pSql->subState.numOfSub != blockNum) {
tscError("0x%"PRIx64" sub num:%d is not same with data block num:%d", pSql->self, pSql->subState.numOfSub, blockNum);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return pRes->code;
}
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter));

View File

@ -288,16 +288,24 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
if (strlen(tsLocale) == 0) { // locale does not set yet
char* defaultLocale = setlocale(LC_CTYPE, "");
// The locale of the current OS does not be set correctly, so the default locale cannot be acquired.
// The launch of current system will abort soon.
if (defaultLocale == NULL) {
tscError("failed to get default locale, please set the correct locale in current OS");
return -1;
}
tstrncpy(tsLocale, defaultLocale, TSDB_LOCALE_LEN);
}
// set the user specified locale
char *locale = setlocale(LC_CTYPE, pStr);
if (locale != NULL) {
if (locale != NULL) { // failed to set the user specified locale
tscInfo("locale set, prev locale:%s, new locale:%s", tsLocale, locale);
cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION;
} else { // set the user-specified localed failed, use default LC_CTYPE as current locale
} else { // set the user specified locale failed, use default LC_CTYPE as current locale
locale = setlocale(LC_CTYPE, tsLocale);
tscInfo("failed to set locale:%s, current locale:%s", pStr, tsLocale);
}

View File

@ -241,6 +241,7 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) {
functionId != TSDB_FUNC_ARITHM &&
functionId != TSDB_FUNC_TS_COMP &&
functionId != TSDB_FUNC_DIFF &&
functionId != TSDB_FUNC_DERIVATIVE &&
functionId != TSDB_FUNC_TS_DUMMY &&
functionId != TSDB_FUNC_TID_TAG) {
return false;
@ -477,15 +478,15 @@ bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
return false;
}
bool tscIsDiffQuery(SQueryInfo* pQueryInfo) {
size_t num = tscNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < num; ++i) {
bool tscIsIrateQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr == NULL || pExpr->base.functionId == TSDB_FUNC_TS_DUMMY) {
if (pExpr == NULL) {
continue;
}
if (pExpr->base.functionId == TSDB_FUNC_DIFF) {
if (pExpr->base.functionId == TSDB_FUNC_IRATE) {
return true;
}
}
@ -531,7 +532,7 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) {
return false;
}
if (tscIsDiffQuery(pQueryInfo)) {
if (tscIsDiffDerivQuery(pQueryInfo)) {
return false;
}
@ -3510,6 +3511,7 @@ static void tscSubqueryRetrieveCallback(void* param, TAOS_RES* tres, int code) {
if (pSql->res.code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pParentSql->param, pParentSql, pParentSql->res.numOfRows);
} else {
pParentSql->res.code = pSql->res.code;
tscAsyncResultOnError(pParentSql);
}
}
@ -4275,7 +4277,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->hasTagResults = hasTagValOutput(pQueryInfo);
pQueryAttr->stabledev = isStabledev(pQueryInfo);
pQueryAttr->tsCompQuery = isTsCompQuery(pQueryInfo);
pQueryAttr->diffQuery = tscIsDiffQuery(pQueryInfo);
pQueryAttr->diffQuery = tscIsDiffDerivQuery(pQueryInfo);
pQueryAttr->simpleAgg = isSimpleAggregateRv(pQueryInfo);
pQueryAttr->needReverseScan = tscNeedReverseScan(pQueryInfo);
pQueryAttr->stableQuery = QUERY_IS_STABLE_QUERY(pQueryInfo->type);

View File

@ -7,7 +7,12 @@ FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
MESSAGE(STATUS "gTest library found, build unit test")
INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR})
# GoogleTest requires at least C++11
SET(CMAKE_CXX_STANDARD 11)
INCLUDE_DIRECTORIES(/usr/include /usr/local/include)
LINK_DIRECTORIES(/usr/lib /usr/local/lib)
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(cliTest ${SOURCE_LIST})

View File

@ -51,6 +51,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_ACCT, "alter-acct" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_TABLE, "alter-table" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_DB, "alter-db" )
TSDB_DEFINE_SQL_TYPE(TSDB_SQL_SYNC_DB_REPLICA, "sync db-replica")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_MNODE, "create-mnode" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_MNODE, "drop-mnode" )
@ -63,6 +64,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_QUERY, "kill-query" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_STREAM, "kill-stream" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_CONNECTION, "kill-connection" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_COMPACT_VNODE, "compact-vnode" )
// SQL below is for read operation
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_READ, "read" )

View File

@ -470,6 +470,9 @@ void buildFilterSetFromBinary(void **q, const char *buf, int32_t len) {
SBufferReader br = tbufInitReader(buf, len, false);
uint32_t type = tbufReadUint32(&br);
SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(type), true, false);
taosHashSetEqualFp(pObj, taosGetDefaultEqualFunction(type));
int dummy = -1;
int32_t sz = tbufReadInt32(&br);
for (int32_t i = 0; i < sz; i++) {

View File

@ -8,9 +8,8 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.30.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.31.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
ENDIF ()

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.30</version>
<version>2.0.31</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.30</version>
<version>2.0.31</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
@ -122,7 +122,7 @@
<exclude>**/FailOverTest.java</exclude>
<exclude>**/InvalidResultSetPointerTest.java</exclude>
<exclude>**/RestfulConnectionTest.java</exclude>
<exclude>**/TD4144Test.java</exclude>
<exclude>**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java</exclude>
</excludes>
<testFailureIgnore>true</testFailureIgnore>
</configuration>

View File

@ -1,5 +1,7 @@
package com.taosdata.jdbc;
import com.taosdata.jdbc.rs.enums.TimestampFormat;
import java.sql.*;
import java.util.Enumeration;
import java.util.Map;
@ -18,7 +20,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
for (String propName : propNames) {
clientInfoProps.setProperty(propName, properties.getProperty(propName));
}
String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "STRING");
String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, String.valueOf(TimestampFormat.STRING));
clientInfoProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, timestampFormat);
}
@ -516,7 +518,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
public void abort(Executor executor) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
// do nothing
}

View File

@ -9,8 +9,6 @@ public abstract class AbstractStatement extends WrapperImpl implements Statement
protected List<String> batchedArgs;
private int fetchSize;
@Override
public abstract ResultSet executeQuery(String sql) throws SQLException;

View File

@ -32,6 +32,7 @@ public class TSDBError {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_SQL, "invalid sql");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE, "unknown taos type in tdengine");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION, "unknown timestamp precision");
/**************************************************/
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");

View File

@ -25,6 +25,7 @@ public class TSDBErrorNumbers {
public static final int ERROR_INVALID_SQL = 0x2313; // invalid sql
public static final int ERROR_NUMERIC_VALUE_OUT_OF_RANGE = 0x2314; // numeric value out of range
public static final int ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE = 0x2315; //unknown taos type in tdengine
public static final int ERROR_UNKNOWN_TIMESTAMP_PERCISION = 0x2316; // unknown timestamp precision
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
@ -62,6 +63,7 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_INVALID_SQL);
errorNumbers.add(ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PERCISION);
/*****************************************************/
errorNumbers.add(ERROR_SUBSCRIBE_FAILED);

View File

@ -216,6 +216,16 @@ public class TSDBJNIConnector {
private native int fetchBlockImp(long connection, long resultSet, TSDBResultSetBlockData blockData);
/**
* Get Result Time Precision.
* @return 0: ms, 1: us, 2: ns
*/
public int getResultTimePrecision(long sqlObj) {
return this.getResultTimePrecisionImp(this.taos, sqlObj);
}
private native int getResultTimePrecisionImp(long connection, long result);
/**
* Execute close operation from C to release connection pointer by JNI
*

View File

@ -38,7 +38,6 @@ import java.util.regex.Pattern;
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
private String rawSql;
private Object[] parameters;
private boolean isPrepared;
private ArrayList<ColumnInfo> colData;
private ArrayList<TableTagInfo> tableTags;
@ -47,8 +46,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
private String tableName;
private long nativeStmtHandle = 0;
private volatile TSDBParameterMetaData parameterMetaData;
TSDBPreparedStatement(TSDBConnection connection, String sql) {
super(connection);
init(sql);
@ -61,7 +58,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
}
parameters = new Object[parameterCnt];
this.isPrepared = true;
}
if (parameterCnt > 1) {
@ -76,11 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
preprocessSql();
}
@Override
public int[] executeBatch() throws SQLException {
return super.executeBatch();
}
/*
* Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by
* the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in
@ -137,29 +128,15 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
/***** for inner queries *****/
}
/**
* Populate parameters into prepared sql statements
*
* @return a string of the native sql statement for TSDB
*/
private String getNativeSql(String rawSql) throws SQLException {
return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
public ResultSet executeQuery() throws SQLException {
if (!isPrepared)
return executeQuery(this.rawSql);
final String sql = getNativeSql(this.rawSql);
final String sql = Utils.getNativeSql(this.rawSql, this.parameters);
return executeQuery(sql);
}
@Override
public int executeUpdate() throws SQLException {
if (!isPrepared)
return executeUpdate(this.rawSql);
String sql = getNativeSql(this.rawSql);
String sql = Utils.getNativeSql(this.rawSql, this.parameters);
return executeUpdate(sql);
}
@ -282,26 +259,15 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public boolean execute() throws SQLException {
if (!isPrepared)
return execute(this.rawSql);
final String sql = getNativeSql(this.rawSql);
final String sql = Utils.getNativeSql(this.rawSql, this.parameters);
return execute(sql);
}
@Override
public void addBatch() throws SQLException {
if (this.batchedArgs == null) {
batchedArgs = new ArrayList<>();
}
if (!isPrepared) {
addBatch(this.rawSql);
} else {
String sql = this.getConnection().nativeSQL(this.rawSql);
String sql = Utils.getNativeSql(this.rawSql, this.parameters);
addBatch(sql);
}
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {

View File

@ -147,30 +147,14 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
return Integer.parseInt((String) obj);
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: {
Byte value = (byte) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return value;
}
case TSDBConstants.TSDB_DATA_TYPE_USMALLINT: {
short value = (short) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return value;
}
case TSDBConstants.TSDB_DATA_TYPE_UINT: {
int value = (int) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return value;
}
case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: {
long value = (long) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return Long.valueOf(value).intValue();
}
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT:
return parseUnsignedTinyIntToInt(obj);
case TSDBConstants.TSDB_DATA_TYPE_USMALLINT:
return parseUnsignedSmallIntToInt(obj);
case TSDBConstants.TSDB_DATA_TYPE_UINT:
return parseUnsignedIntegerToInt(obj);
case TSDBConstants.TSDB_DATA_TYPE_UBIGINT:
return parseUnsignedBigIntToInt(obj);
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
return ((Float) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
@ -180,6 +164,35 @@ public class TSDBResultSetRowData {
}
}
private byte parseUnsignedTinyIntToInt(Object obj) throws SQLException {
byte value = (byte) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return value;
}
private short parseUnsignedSmallIntToInt(Object obj) throws SQLException {
short value = (short) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return value;
}
private int parseUnsignedIntegerToInt(Object obj) throws SQLException {
int value = (int) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return value;
}
private int parseUnsignedBigIntToInt(Object obj) throws SQLException {
long value = (long) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return Long.valueOf(value).intValue();
}
/**
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
*/
@ -216,7 +229,7 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
return Long.parseLong((String) obj);
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: {
Byte value = (byte) obj;
byte value = (byte) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return value;
@ -414,26 +427,40 @@ public class TSDBResultSetRowData {
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
*/
public void setTimestampValue(int colIndex, long value) {
setTimestamp(colIndex - 1, value);
setTimestamp(colIndex - 1, value, 0);
}
/**
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
* @param precision 0 : ms, 1 : us, 2 : ns
*/
public void setTimestamp(int col, long ts) {
//TODO: this implementation contains logical error
// when precision is us the (long ts) is 16 digital number
// when precision is ms, the (long ts) is 13 digital number
// we need a JNI function like this:
// public void setTimestamp(int col, long epochSecond, long nanoAdjustment)
if (ts < 1_0000_0000_0000_0L) {
data.set(col, new Timestamp(ts));
} else {
long epochSec = ts / 1000_000l;
long nanoAdjustment = ts % 1000_000l * 1000l;
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
data.set(col, timestamp);
public void setTimestamp(int col, long ts, int precision) {
long milliseconds = 0;
int fracNanoseconds = 0;
switch (precision) {
case 0: {
milliseconds = ts;
fracNanoseconds = (int)(ts*1_000_000%1_000_000_000);
break;
}
case 1: {
milliseconds = ts/1_000;
fracNanoseconds = (int)(ts*1_000%1_000_000_000);
break;
}
case 2: {
milliseconds = ts/1_000_000;
fracNanoseconds = (int)(ts%1_000_000_000);
break;
}
default: {
throw new IllegalArgumentException("precision is not valid. precision: " + precision);
}
}
Timestamp tsObj = new Timestamp(milliseconds);
tsObj.setNanos(fracNanoseconds);
data.set(col, tsObj);
}
public Timestamp getTimestamp(int col, int nativeType) {

View File

@ -0,0 +1,8 @@
package com.taosdata.jdbc.enums;
public enum TimestampPrecision {
MS,
US,
NS,
UNKNOWN
}

View File

@ -17,16 +17,18 @@ public class RestfulConnection extends AbstractConnection {
private final int port;
private final String url;
private volatile String database;
private final String token;
/******************************************************/
private boolean isClosed;
private final DatabaseMetaData metadata;
public RestfulConnection(String host, String port, Properties props, String database, String url) {
public RestfulConnection(String host, String port, Properties props, String database, String url, String token) {
super(props);
this.host = host;
this.port = Integer.parseInt(port);
this.database = database;
this.url = url;
this.token = token;
this.metadata = new RestfulDatabaseMetaData(url, props.getProperty(TSDBDriver.PROPERTY_KEY_USER), this);
}
@ -66,6 +68,7 @@ public class RestfulConnection extends AbstractConnection {
return this.metadata;
}
// getters
public String getHost() {
return host;
}
@ -81,4 +84,8 @@ public class RestfulConnection extends AbstractConnection {
public String getUrl() {
return url;
}
public String getToken() {
return token;
}
}

View File

@ -38,15 +38,11 @@ public class RestfulDriver extends AbstractDriver {
String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041");
String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null;
String loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":"
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/"
+ props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/"
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
try {
String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8");
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8");
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":"
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
@ -55,12 +51,12 @@ public class RestfulDriver extends AbstractDriver {
JSONObject jsonResult = JSON.parseObject(result);
String status = jsonResult.getString("status");
String token = jsonResult.getString("desc");
HttpClientPoolUtil.token = token;
if (!status.equals("succ")) {
throw new SQLException(jsonResult.getString("desc"));
}
RestfulConnection conn = new RestfulConnection(host, port, props, database, url);
RestfulConnection conn = new RestfulConnection(host, port, props, database, url, token);
if (database != null && !database.trim().replaceAll("\\s", "").isEmpty()) {
Statement stmt = conn.createStatement();
stmt.execute("use " + database);

View File

@ -44,7 +44,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
if (!isPrepared)
return executeQuery(this.rawSql);
final String sql = getNativeSql(this.rawSql);
final String sql = Utils.getNativeSql(this.rawSql, this.parameters);
return executeQuery(sql);
}
@ -55,20 +55,10 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
if (!isPrepared)
return executeUpdate(this.rawSql);
final String sql = getNativeSql(this.rawSql);
final String sql = Utils.getNativeSql(rawSql, this.parameters);
return executeUpdate(sql);
}
/****
* 将rawSql转换成一条可执行的sql语句使用属性parameters中的变脸进行替换
* 对于insert into ?.? (?,?,?) using ?.? (?,?,?) tags(?, ?, ?) values(?, ?, ?)
* @param rawSql可能是insertselect或其他使用?做占位符
* @return
*/
private String getNativeSql(String rawSql) {
return Utils.getNativeSql(rawSql, this.parameters);
}
@Override
public void setNull(int parameterIndex, int sqlType) throws SQLException {
if (isClosed())
@ -224,16 +214,13 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
if (!isPrepared)
return execute(this.rawSql);
final String sql = getNativeSql(rawSql);
final String sql = Utils.getNativeSql(rawSql, this.parameters);
return execute(sql);
}
@Override
public void addBatch() throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
final String sql = getNativeSql(this.rawSql);
final String sql = Utils.getNativeSql(rawSql, this.parameters);
addBatch(sql);
}

View File

@ -6,6 +6,8 @@ import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
import com.taosdata.jdbc.*;
import com.taosdata.jdbc.enums.TimestampPrecision;
import com.taosdata.jdbc.rs.enums.TimestampFormat;
import com.taosdata.jdbc.utils.Utils;
import java.math.BigDecimal;
@ -46,6 +48,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
columnNames.clear();
columns.clear();
this.resultSet.clear();
this.metaData = new RestfulResultSetMetaData(this.database, null, this);
return;
}
// get head
@ -131,7 +134,6 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
}
}
private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException {
switch (taosType) {
case TSDBConstants.TSDB_DATA_TYPE_NULL:
@ -150,44 +152,8 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return row.getFloat(colIndex);
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
return row.getDouble(colIndex);
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
if (row.get(colIndex) == null)
return null;
String timestampFormat = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) {
Long value = row.getLong(colIndex);
//TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9
if (value < 1_0000_0000_0000_0L)
return new Timestamp(value);
long epochSec = value / 1000_000l;
long nanoAdjustment = value % 1000_000l * 1000l;
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
if ("UTC".equalsIgnoreCase(timestampFormat)) {
String value = row.getString(colIndex);
long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000;
int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5));
long nanoAdjustment = 0;
if (value.length() > 28) {
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00
nanoAdjustment = fractionalSec * 1000l;
} else {
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00
nanoAdjustment = fractionalSec * 1000_000l;
}
ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5));
Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant();
return Timestamp.from(instant);
}
String value = row.getString(colIndex);
if (value.length() <= 23) // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS
return row.getTimestamp(colIndex);
// us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS
long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000;
long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000l;
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
return timestamp;
}
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
return parseTimestampColumnData(row, colIndex);
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
@ -197,7 +163,66 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
}
}
public class Field {
private Timestamp parseTimestampColumnData(JSONArray row, int colIndex) throws SQLException {
if (row.get(colIndex) == null)
return null;
String tsFormatUpperCase = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).toUpperCase();
TimestampFormat timestampFormat = TimestampFormat.valueOf(tsFormatUpperCase);
switch (timestampFormat) {
case TIMESTAMP: {
Long value = row.getLong(colIndex);
//TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9
if (value < 1_0000_0000_0000_0L)
return new Timestamp(value);
long epochSec = value / 1000_000l;
long nanoAdjustment = value % 1000_000l * 1000l;
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
case UTC: {
String value = row.getString(colIndex);
long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000;
int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5));
long nanoAdjustment;
if (value.length() > 31) {
// ns timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSSSSS+0x00
nanoAdjustment = fractionalSec;
} else if (value.length() > 28) {
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00
nanoAdjustment = fractionalSec * 1000L;
} else {
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00
nanoAdjustment = fractionalSec * 1000_000L;
}
ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5));
Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant();
return Timestamp.from(instant);
}
case STRING:
default: {
String value = row.getString(colIndex);
TimestampPrecision precision = Utils.guessTimestampPrecision(value);
if (precision == TimestampPrecision.MS) {
// ms timestamp: yyyy-MM-dd HH:mm:ss.SSS
return row.getTimestamp(colIndex);
}
if (precision == TimestampPrecision.US) {
// us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS
long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000;
long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000L;
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
if (precision == TimestampPrecision.NS) {
// ms timestamp: yyyy-MM-dd HH:mm:ss.SSSSSSSSS
long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000;
long nanoAdjustment = Integer.parseInt(value.substring(20));
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION);
}
}
}
public static class Field {
String name;
int type;
int length;
@ -211,6 +236,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
this.note = note;
this.taos_type = taos_type;
}
}
@Override
@ -334,6 +360,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
wasNull = true;
return 0;
}
wasNull = false;
if (value instanceof Timestamp) {
return ((Timestamp) value).getTime();

View File

@ -8,20 +8,22 @@ import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class RestfulResultSetMetaData extends WrapperImpl implements ResultSetMetaData {
private final String database;
private ArrayList<RestfulResultSet.Field> fields;
private List<RestfulResultSet.Field> fields;
private final RestfulResultSet resultSet;
public RestfulResultSetMetaData(String database, ArrayList<RestfulResultSet.Field> fields, RestfulResultSet resultSet) {
this.database = database;
this.fields = fields;
this.fields = fields == null ? Collections.emptyList() : fields;
this.resultSet = resultSet;
}
public ArrayList<RestfulResultSet.Field> getFields() {
public List<RestfulResultSet.Field> getFields() {
return fields;
}

View File

@ -83,7 +83,7 @@ public class RestfulStatement extends AbstractStatement {
}
if (SqlSyntaxValidator.isUseSql(sql)) {
HttpClientPoolUtil.execute(url, sql);
HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
result = false;
@ -116,7 +116,7 @@ public class RestfulStatement extends AbstractStatement {
if ("UTC".equalsIgnoreCase(timestampFormat))
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
String result = HttpClientPoolUtil.execute(url, sql);
String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
@ -130,7 +130,7 @@ public class RestfulStatement extends AbstractStatement {
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
String result = HttpClientPoolUtil.execute(url, sql);
String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));

View File

@ -0,0 +1,7 @@
package com.taosdata.jdbc.rs.enums;
public enum TimestampFormat {
STRING,
TIMESTAMP,
UTC
}

View File

@ -16,43 +16,32 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
public class HttpClientPoolUtil {
public static PoolingHttpClientConnectionManager cm = null;
public static CloseableHttpClient httpClient = null;
public static String token = "cm9vdDp0YW9zZGF0YQ==";
/**
* 默认content 类型
*/
private static final String DEFAULT_CONTENT_TYPE = "application/json";
/**
* 默认请求超时时间30s
*/
private static final int DEFAULT_TIME_OUT = 15000;
private static final int count = 32;
private static final int totalCount = 1000;
private static final int Http_Default_Keep_Time = 15000;
/**
* 初始化连接池
*/
private static final String DEFAULT_CONTENT_TYPE = "application/json";
private static final int DEFAULT_TIME_OUT = 15000;
private static final int DEFAULT_MAX_PER_ROUTE = 32;
private static final int DEFAULT_MAX_TOTAL = 1000;
private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
private static PoolingHttpClientConnectionManager connectionManager;
private static CloseableHttpClient httpClient;
private static synchronized void initPools() {
if (httpClient == null) {
cm = new PoolingHttpClientConnectionManager();
cm.setDefaultMaxPerRoute(count);
cm.setMaxTotal(totalCount);
httpClient = HttpClients.custom().setKeepAliveStrategy(defaultStrategy).setConnectionManager(cm).build();
connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).build();
}
}
/**
* Http connection keepAlive 设置
*/
private static ConnectionKeepAliveStrategy defaultStrategy = (response, context) -> {
private static ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
int keepTime = Http_Default_Keep_Time * 1000;
int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000;
while (it.hasNext()) {
HeaderElement headerElement = it.nextElement();
String param = headerElement.getName();
@ -76,7 +65,7 @@ public class HttpClientPoolUtil {
* @param data 请求数据
* @return responseBody
*/
public static String execute(String uri, String data) {
public static String execute(String uri, String data, String token) {
long startTime = System.currentTimeMillis();
HttpEntity httpEntity = null;
HttpEntityEnclosingRequestBase method = null;
@ -90,7 +79,7 @@ public class HttpClientPoolUtil {
method.setHeader("Connection", "keep-alive");
method.setHeader("Authorization", "Taosd " + token);
method.setEntity(new StringEntity(data, Charset.forName("UTF-8")));
method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
@ -175,28 +164,18 @@ public class HttpClientPoolUtil {
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
// logger.info("请求URL: " + uri + "+ 返回状态码:" + httpResponse.getStatusLine().getStatusCode());
}
} catch (Exception e) {
if (method != null) {
method.abort();
}
e.printStackTrace();
// logger.error("execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):"
// + (System.currentTimeMillis() - startTime));
System.out.println("log:调用 HttpClientPoolUtil execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):"
+ (System.currentTimeMillis() - startTime));
} finally {
if (httpEntity != null) {
try {
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
// e.printStackTrace();
// logger.error("close response exception, url:" + uri + ", exception:" + e.toString()
// + ",cost time(ms):" + (System.currentTimeMillis() - startTime));
new Exception("close response exception, url:" + uri + ", exception:" + e.toString()
+ ",cost time(ms):" + (System.currentTimeMillis() - startTime))
.printStackTrace();
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
}
}
}

Some files were not shown because too many files have changed in this diff Show More