Merge branch '3.0' into feature/3_liaohj

This commit is contained in:
Haojun Liao 2022-07-15 13:36:14 +08:00
commit 595c7dd4f3
876 changed files with 2238 additions and 121581 deletions

View File

@ -1,54 +0,0 @@
IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
SET(PROJECT_VERSION_MINOR "${LIB_MINOR_VERSION}")
SET(PROJECT_VERSION_PATCH "${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION "${LIB_VERSION_STRING}")
ELSE ()
CMAKE_POLICY(SET CMP0048 NEW)
PROJECT(TDengine VERSION "${LIB_VERSION_STRING}" LANGUAGES CXX)
ENDIF ()
IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
ELSE ()
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
ENDIF ()
SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE)
SET(TD_MQTT FALSE)
SET(TD_TSDB_PLUGINS FALSE)
SET(TD_STORAGE FALSE)
SET(TD_TOPIC FALSE)
SET(TD_MODULE FALSE)
SET(TD_COVER FALSE)
SET(TD_MEM_CHECK FALSE)
SET(TD_PAGMODE_LITE FALSE)
SET(TD_SOMODE_STATIC FALSE)
SET(TD_POWER FALSE)
SET(TD_GODLL FALSE)
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
MESSAGE(STATUS "Community directory: " ${TD_COMMUNITY_DIR})
INCLUDE(cmake/input.inc)
INCLUDE(cmake/platform.inc)
IF (TD_WINDOWS OR TD_DARWIN)
SET(TD_SOMODE_STATIC TRUE)
ENDIF ()
INCLUDE(cmake/define.inc)
INCLUDE(cmake/env.inc)
INCLUDE(cmake/version.inc)
INCLUDE(cmake/install.inc)
ADD_SUBDIRECTORY(deps)
ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tests)
INCLUDE(CPack)

View File

@ -1,137 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/administrator-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/administrator-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>系统管理</h1>
<a class='anchor' id='文件目录结构'></a><h2>文件目录结构</h2>
<p>安装TDengine后默认会在操作系统中生成下列目录或文件</p>
<figure><table>
<thead>
<tr>
<th>目录/文件</th>
<th style="text-align:left;">说明</th>
</tr>
</thead>
<tbody>
<tr>
<td>/etc/taos/taos.cfg</td>
<td style="text-align:left;">TDengine默认[配置文件]</td>
</tr>
<tr>
<td>/usr/local/taos/driver</td>
<td style="text-align:left;">TDengine动态链接库目录</td>
</tr>
<tr>
<td>/var/lib/taos</td>
<td style="text-align:left;">TDengine默认数据文件目录,可通过[配置文件]修改位置.</td>
</tr>
<tr>
<td>/var/log/taos</td>
<td style="text-align:left;">TDengine默认日志文件目录,可通过[配置文件]修改位置</td>
</tr>
<tr>
<td>/usr/local/taos/bin</td>
<td style="text-align:left;">TDengine可执行文件目录</td>
</tr>
</tbody>
</table></figure>
<a class='anchor' id='可执行文件'></a><h3>可执行文件</h3>
<p>TDengine的所有可执行文件默认存放在 <em>/usr/local/taos/bin</em> 目录下。其中包括:</p>
<ul>
<li><em>taosd</em>TDengine服务端可执行文件</li>
<li><em>taos</em> TDengine Shell可执行文件</li>
<li><em>taosdump</em>:数据导出工具</li>
<li><em>rmtaos</em> 一个卸载TDengine的脚本, 请谨慎执行</li>
</ul>
<p>您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录</p>
<a class='anchor' id='服务端配置'></a><h2>服务端配置</h2>
<p>TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修改配置参数以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。</p>
<p>下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节。<strong>注意:配置修改后,需要重启<em>taosd</em>服务才能生效。</strong></p>
<ul>
<li>internalIp: 对外提供服务的IP地址默认取第一个IP地址</li>
<li>mgmtShellPort管理节点与客户端通信使用的TCP/UDP端口号默认值是6030。此端口号在内向后连续的5个端口都会被UDP通信占用即UDP占用[6030-6034]同时TCP通信也会使用端口[6030]。</li>
<li>vnodeShellPort数据节点与客户端通信使用的TCP/UDP端口号默认值是6035。此端口号在内向后连续的5个端口都会被UDP通信占用即UDP占用[6035-6039]同时TCP通信也会使用端口[6035]</li>
<li>httpPort数据节点对外提供RESTful服务使用TCP端口号[6020]</li>
<li>dataDir: 数据文件目录,缺省是/var/lib/taos</li>
<li>maxUsers用户的最大数量</li>
<li>maxDbs数据库的最大数量</li>
<li>maxTables数据表的最大数量</li>
<li>enableMonitor: 系统监测标志位0关闭1打开</li>
<li>logDir: 日志文件目录,缺省是/var/log/taos</li>
<li>numOfLogLines日志文件的最大行数</li>
<li>debugFlag: 系统debug日志开关131仅错误和报警信息135调试信息143非常详细的调试信息</li>
</ul>
<p>不同应用场景的数据往往具有不同的数据特征比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率TDengine提供如下存储相关的系统配置参数</p>
<ul>
<li>days一个数据文件覆盖的时间长度单位为天</li>
<li>keep数据库中数据保留的天数</li>
<li>rows: 文件块中记录条数</li>
<li>comp: 文件压缩标志位0关闭1:一阶段压缩2:两阶段压缩</li>
<li>ctime数据从写入内存到写入硬盘的最长时间间隔单位为秒</li>
<li>clog数据提交日志(WAL)的标志位0为关闭1为打开</li>
<li>tables每个vnode允许创建表的最大数目</li>
<li>cache: 内存块的大小(字节数)</li>
<li>tblocks: 每张表最大的内存块数</li>
<li>ablocks: 每张表平均的内存块数</li>
<li>precision时间戳为微秒的标志位ms表示毫秒us表示微秒</li>
</ul>
<p>对于一个应用场景可能有多种数据特征的数据并存最佳的设计是将具有相同数据特征的表放在一个库里这样一个应用有多个库而每个库可以配置不同的存储参数从而保证系统有最优的性能。TDengine容许应用在创建库时指定上述存储参数如果指定该参数就将覆盖对应的系统配置参数。举例有下述SQL </p>
<pre><code> create database demo days 10 cache 16000 ablocks 4</code></pre>
<p>该SQL创建了一个库demo, 每个数据文件保留10天数据内存块为16000字节每个表平均占用4个内存块而其他参数与系统配置完全一致。</p>
<a class='anchor' id='客户端配置'></a><h2>客户端配置</h2>
<p>TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见<a href="#_TDengine_Shell命令行程序">Shell命令行程序</a>。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。</p>
<p>客户端配置参数列表及解释</p>
<ul>
<li>masterIP客户端默认发起请求的服务器的IP地址</li>
<li>charset指明客户端所使用的字符集默认值为UTF-8。TDengine存储nchar类型数据时使用的是unicode存储因此客户端需要告知服务自己所使用的字符集也即客户端所在系统的字符集。</li>
<li>locale设置系统语言环境。Linux上客户端与服务端共享</li>
<li>defaultUser默认登录用户默认值root</li>
<li>defaultPass默认登录密码默认值taosdata</li>
</ul>
<p>TCP/UDP端口以及日志的配置参数与server的配置参数完全一样。</p>
<p>启动taos时你也可以从命令行指定IP地址、端口号用户名和密码否则就从taos.cfg读取。</p>
<a class='anchor' id='用户管理'></a><h2>用户管理</h2>
<p>系统管理员可以在CLI界面里添加、删除用户也可以修改密码。CLI里SQL语法如下</p>
<pre><code>CREATE USER user_name PASS password</code></pre>
<p>创建用户,并制定用户名和密码,密码需要用单引号引起来</p>
<pre><code>DROP USER user_name</code></pre>
<p>删除用户限root用户使用</p>
<pre><code>ALTER USER user_name PASS password </code></pre>
<p>修改用户密码, 为避免被转换为小写,密码需要用单引号引用</p>
<pre><code>SHOW USERS</code></pre>
<p>显示所有用户</p>
<a class='anchor' id='数据导入'></a><h2>数据导入</h2>
<p>TDengine提供两种方便的数据导入功能一种按脚本文件导入一种按数据文件导入。</p>
<p><strong>按脚本文件导入</strong></p>
<p>TDengine的shell支持source filename命令用于批量运行文件中的SQL语句。用户可将建库、建表、写数据等SQL命令写在同一个文件中每条命令单独一行在shell中运行source命令即可按顺序批量运行文件中的SQL语句。以#开头的SQL语句被认为是注释shell将自动忽略。</p>
<p><strong>按数据文件导入</strong></p>
<p>TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。每个CSV文件只属于一张表且CSV文件中的数据格式需与要导入表的结构相同。其语法如下</p>
<pre><code class="mysql language-mysql">insert into tb1 file a.csv b.csv tb2 c.csv …
import into tb1 file a.csv b.csv tb2 c.csv …</code></pre>
<a class='anchor' id='数据导出'></a><h2>数据导出</h2>
<p>为方便数据导出TDengine提供了两种导出方式分别是按表导出和用taosdump导出。</p>
<p><strong>按表导出CSV文件</strong></p>
<p>如果用户需要导出一个表或一个STable中的数据可在shell中运行</p>
<pre><code>select * from &lt;tb_name&gt; &gt;&gt; a.csv</code></pre>
<p>这样表tb中的数据就会按照CSV格式导出到文件a.csv中。</p>
<p><strong>用taosdump导出数据</strong></p>
<p>TDengine提供了方便的数据库导出工具taosdump。用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。其用法如下:</p>
<ul>
<li>导出数据库中的一张或多张表taosdump [OPTION…] dbname tbname …</li>
<li>导出一个或多个数据库: taosdump [OPTION…] --databases dbname…</li>
<li>导出所有数据库不含监控数据库taosdump [OPTION…] --all-databases</li>
</ul>
<p>用户可通过运行taosdump --help获得更详细的用法说明</p>
<a class='anchor' id='系统连接、任务查询管理'></a><h2>系统连接、任务查询管理</h2>
<p>系统管理员可以从CLI查询系统的连接、正在进行的查询、流式计算并且可以关闭连接、停止正在进行的查询和流式计算。CLI里SQL语法如下</p>
<pre><code>SHOW CONNECTIONS</code></pre>
<p>显示数据库的连接其中一列显示ip:port, 为连接的IP地址和端口号。</p>
<pre><code>KILL CONNECTION &lt;connection-id&gt;</code></pre>
<p>强制关闭数据库连接其中的connection-id是SHOW CONNECTIONS中显示的 ip:port字串如“192.168.0.1:42198”拷贝粘贴即可。</p>
<pre><code>SHOW QUERIES</code></pre>
<p>显示数据查询其中一列显示ip:port:id, 为发起该query应用的IP地址端口号以及系统分配的ID。</p>
<pre><code>KILL QUERY &lt;query-id&gt;</code></pre>
<p>强制关闭数据查询其中query-id是SHOW QUERIES中显示的ip:port:id字串如“192.168.0.1:42198:11”拷贝粘贴即可。</p>
<pre><code>SHOW STREAMS</code></pre>
<p>显示流式计算其中一列显示ip:port:id, 为启动该stream的IP地址、端口和系统分配的ID。</p>
<pre><code>KILL STREAM &lt;stream-id&gt;</code></pre>
<p>强制关闭流式计算其中的中stream-id是SHOW STREAMS中显示的ip:port:id字串如“192.168.0.1:42198:18”拷贝粘贴即可。</p>
<a class='anchor' id='系统监控'></a><h2>系统监控</h2>
<p>TDengine启动后会自动创建一个监测数据库SYS并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作比如登录、创建、删除数据库等日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库也可以在WEB通过图形化界面查看这些监测信息。</p>
<p>这些监测信息的采集缺省是打开的但可以修改配置文件里的选项enableMonitor将其关闭或打开。</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,64 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/advanced-features-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/advanced-features-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>高级功能</h1>
<a class='anchor' id='连续查询(Continuous-Query)'></a><h2>连续查询(Continuous Query)</h2>
<p>连续查询是TDengine定期自动执行的查询采用滑动窗口的方式进行计算是一种简化的时间驱动的流式计算。针对库中的表或超级表TDengine可提供定期自动执行的连续查询用户可让TDengine推送查询的结果也可以将结果再写回到TDengine中。每次执行的查询是一个时间窗口时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口time window, 参数interval 大小和每次前向增量时间forward sliding times, 参数sliding</p>
<p>TDengine的连续查询采用时间驱动模式可以直接使用TAOS SQL进行定义不需要额外的操作。使用连续查询可以方便快捷地按照时间窗口生成结果从而对原始采集数据进行降采样down sampling。用户通过TAOS SQL定义连续查询以后TDengine自动在最后的一个完整的时间周期末端拉起查询并将计算获得的结果推送给用户或者写回TDengine。</p>
<p>TDengine提供的连续查询与普通流计算中的时间窗口计算具有以下区别</p>
<ul>
<li><p>不同于流计算的实时反馈计算结果连续查询只在时间窗口关闭以后才开始计算。例如时间周期是1天那么当天的结果只会在23:59:59以后才会生成。</p></li>
<li><p>如果有历史记录写入到已经计算完成的时间区间连续查询并不会重新进行计算也不会重新将结果推送给用户。对于写回TDengine的模式也不会更新已经存在的计算结果。</p></li>
<li><p>使用连续查询推送结果的模式服务端并不缓存客户端计算状态也不提供Exactly-Once的语意保证。如果用户的应用端崩溃再次拉起的连续查询将只会从再次拉起的时间开始重新计算最近的一个完整的时间窗口。如果使用写回模式TDengine可确保数据写回的有效性和连续性。</p></li>
</ul>
<h4>使用连续查询</h4>
<p>使用TAOS SQL定义连续查询的过程需要调用API taos_stream在应用端启动连续查询。例如要对统计表FOO_TABLE 每1分钟统计一次记录数量前向滑动的时间是30秒SQL语句如下</p>
<pre><code class="sql language-sql">SELECT COUNT(*)
FROM FOO_TABLE
INTERVAL(1M) SLIDING(30S)</code></pre>
<p>其中查询的时间窗口time window是1分钟前向增量forward sliding time时间是30秒。也可以不使用sliding来指定前向滑动时间此时系统将自动向前滑动一个查询时间窗口再开始下一次计算即时间窗口长度等于前向滑动时间。</p>
<pre><code class="sql language-sql">SELECT COUNT(*)
FROM FOO_TABLE
INTERVAL(1M)</code></pre>
<p>如果需要将连续查询的计算结果写回到数据库中可以使用如下的SQL语句</p>
<pre><code class="sql language-sql">CREATE TABLE QUERY_RES
AS
SELECT COUNT(*)
FROM FOO_TABLE
INTERVAL(1M) SLIDING(30S)</code></pre>
<p>此时系统将自动创建表QUERY_RES然后将连续查询的结果写入到该表。需要注意的是前向滑动时间不能大于时间窗口的范围。如果用户指定的前向滑动时间超过时间窗口范围系统将自动将其设置为时间窗口的范围值。如上所示SQL语句如果用户设置前向滑动时间超过1分钟系统将强制将其设置为1分钟。 </p>
<p>此外TDengine还支持用户指定连续查询的结束时间默认如果不输入结束时间连续查询将永久运行如果用户指定了结束时间连续查询在系统时间达到指定的时间以后停止运行。如SQL所示连续查询将运行1个小时1小时之后连续查询自动停止。</p>
<pre><code class="sql language-sql">CREATE TABLE QUERY_RES
AS
SELECT COUNT(*)
FROM FOO_TABLE
WHERE TS &gt; NOW AND TS &lt;= NOW + 1H
INTERVAL(1M) SLIDING(30S) </code></pre>
<p>此外还需要注意的是查询时间窗口的最小值是10毫秒没有时间窗口范围的上限。</p>
<h4>管理连续查询</h4>
<p>用户可在控制台中通过 <em>show streams</em> 命令来查看系统中全部运行的连续查询,并可以通过 <em>kill stream</em> 命令杀掉对应的连续查询。在写回模式中,如果用户可以直接将写回的表删除,此时连续查询也会自动停止并关闭。后续版本会提供更细粒度和便捷的连续查询管理命令。</p>
<a class='anchor' id='数据订阅(Publisher/Subscriber)'></a><h2>数据订阅(Publisher/Subscriber)</h2>
<p>基于数据天然的时间序列特性TDengine的数据写入insert与消息系统的数据发布pub逻辑上一致均可视为系统中插入一条带时间戳的新记录。同时TDengine在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说TDengine中里每一张表均可视为一个标准的消息队列。</p>
<p>TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API用户可订阅数据库中的某一张表或超级表。订阅的逻辑和操作状态的维护均是由客户端完成客户端定时轮询服务器是否有新的记录到达有新的记录到达就会将结果反馈到客户。</p>
<p>TDengine的订阅与推送服务的状态是客户端维持TDengine服务器并不维持。因此如果应用重启从哪个时间点开始获取最新数据由应用决定。</p>
<h4>API说明</h4>
<p>使用订阅的功能主要API如下</p>
<ul>
<li><p><code>TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)</code></p><p>该函数负责启动订阅服务。其中参数说明:</p></li><ul>
<li><p>host主机IP地址</p></li>
<li><p>user数据库登录用户名</p></li>
<li><p>pass密码</p></li>
<li><p>db数据库名称</p></li>
<li><p>table(超级) 表的名称</p></li>
<li><p>time启动时间Unix Epoch时间单位为毫秒。从1970年1月1日起计算的毫秒数。如果设为0表示从当前时间开始订阅</p></li>
<li><p>mseconds查询数据库更新的时间间隔单位为毫秒。一般设置为1000毫秒。返回值为指向TDengine_SUB 结构的指针,如果返回为空,表示失败。</p></li>
</ul><li><p><code>TAOS_ROW taos_consume(TAOS_SUB *tsub)</code>
</p><p>该函数用来获取订阅的结果用户应用程序将其置于一个无限循环语句。如果数据库有新记录到达该API将返回该最新的记录。如果没有新的记录该API将阻塞。如果返回值为空说明系统出错。参数说明</p></li><ul><li><p>tsubtaos_subscribe的结构体指针。</p></li></ul><li><p><code>void taos_unsubscribe(TAOS_SUB *tsub)</code></p><p>取消订阅。应用程序退出时,务必调用该函数以避免资源泄露。</p></li>
<li><p><code>int taos_num_fields(TAOS_SUB *tsub)</code></p><p>获取返回的一行记录中数据包含多少列。</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_fields(TAOS_SUB *tsub)</code></p><p>获取每列数据的属性数据类型、名字、长度与taos_num_subfileds配合使用可解析返回的每行数据。</p></li></ul>
<p>示例代码:请看安装包中的的示范程序</p>
<a class='anchor' id='缓存-(Cache)'></a><h2>缓存 (Cache)</h2>
<p>TDengine采用时间驱动缓存管理策略First-In-First-OutFIFO又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式Least-Recent-UseLRU直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候将最早的数据批量写入磁盘。一般意义上来说对于物联网数据的使用用户最为关心最近产生的数据即当前状态。TDengine充分利用了这一特性将最近到达的当前状态数据保存在缓存中。</p>
<p>TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中可以更加快速地响应用户针对最近一条或一批数据的查询分析整体上提供更快的数据库查询响应能力。从这个意义上来说可通过设置合适的配置参数将TDengine作为数据缓存来使用而不需要再部署额外的缓存系统可有效地简化系统架构降低运维的成本。需要注意的是TDengine重启以后系统的缓存将被清空之前缓存的数据均会被批量写入磁盘缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。</p>
<p>TDengine分配固定大小的内存空间作为缓存空间缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点virtual node创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。</p>
<p>一个缓存池了有很多个缓存块缓存的大小由缓存块的个数以及缓存块的大小决定。参数cacheBlockSize决定每个缓存块的大小参数cacheNumOfBlocks决定每个虚拟节点可用缓存块数量。因此单个虚拟节点总缓存开销为cacheBlockSize x cacheNumOfBlocks。参数numOfBlocksPerMeter决定每张表可用缓存块的数量TDengine要求每张表至少有2个缓存块可供使用因此cacheNumOfBlocks的数值不应该小于虚拟节点中所包含的表数量的两倍即cacheNumOfBlocks ≤ sessionsPerVnode x 2。一般情况下cacheBlockSize可以不用调整使用系统默认值即可缓存块需要存储至少几十条记录才能确保TDengine更有效率地进行数据写入。</p>
<p>你可以通过函数last快速获取一张表或一张超级表的最后一条记录这样很便于在大屏显示各设备的实时状态或采集值。例如</p>
<pre><code class="mysql language-mysql">select degree from thermometer where location='beijing';</code></pre>
<p>该SQL语句将获取所有位于北京的传感器最后记录的温度值。</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

View File

@ -1,93 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/connections-with-other-tools-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/connections-with-other-tools-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>与其他工具的连接</h1>
<a class='anchor' id='Telegraf'></a><h2>Telegraf</h2>
<p>TDengine能够与开源数据采集系统<a href="https://www.influxdata.com/time-series-platform/telegraf/">Telegraf</a>快速集成,整个过程无需任何代码开发。</p>
<a class='anchor' id='安装Telegraf'></a><h3>安装Telegraf</h3>
<p>目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统到Telegraf官网下载安装包并执行安装。下载地址如下https://portal.influxdata.com/downloads</p>
<a class='anchor' id='配置Telegraf'></a><h3>配置Telegraf</h3>
<p>修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。 </p>
<p>在output plugins部分增加[[outputs.http]]配置项: </p>
<ul>
<li>urlhttp://ip:6020/telegraf/udb其中ip为TDengine集群的中任意一台服务器的IP地址6020为TDengine RESTful接口的端口号telegraf为固定关键字udb为用于存储采集数据的数据库名称可预先创建。</li>
<li>method: "POST" </li>
<li>username: 登录TDengine的用户名</li>
<li>password: 登录TDengine的密码</li>
<li>data_format: "json"</li>
<li>json_timestamp_units: "1ms"</li>
</ul>
<p>在agent部分</p>
<ul>
<li>hostname: 区分不同采集设备的机器名称,需确保其唯一性</li>
<li>metric_batch_size: 30允许Telegraf每批次写入记录最大数量增大其数量可以降低Telegraf的请求发送频率但对于TDengine该数值不能超过50</li>
</ul>
<p>关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息请参考Telegraf官方的<a href="https://docs.influxdata.com/telegraf/v1.11/">文档</a></p>
<a class='anchor' id='Grafana'></a><h2>Grafana</h2>
<p>TDengine能够与开源数据可视化系统<a href="https://www.grafana.com/">Grafana</a>快速集成搭建数据监测报警系统整个过程无需任何代码开发TDengine中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。</p>
<a class='anchor' id='安装Grafana'></a><h3>安装Grafana</h3>
<p>目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统到Grafana官网下载安装包并执行安装。下载地址如下https://grafana.com/grafana/download</p>
<a class='anchor' id='配置Grafana'></a><h3>配置Grafana</h3>
<p>TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录下。</p>
<p>以CentOS 7.2操作系统为例将tdengine目录拷贝到/var/lib/grafana/plugins目录下重新启动grafana即可。</p>
<a class='anchor' id='使用Grafana'></a><h3>使用Grafana</h3>
<p>用户可以直接通过localhost:3000的网址登录Grafana服务器(用户名/密码:admin/admin)配置TDengine数据源如下图所示此时可以在下拉列表中看到TDengine数据源。 </p>
<p><img src="../assets/clip_image001.png" alt="img" /></p>
<p>TDengine数据源中的HTTP配置里面的Host地址要设置为TDengine集群的中任意一台服务器的IP地址与TDengine RESTful接口的端口号(6020)。假设TDengine数据库与Grafana部署在同一机器那么应输入http://localhost:6020。</p>
<p>此外还需配置登录TDengine的用户名与密码然后点击下图中的Save&amp;Test按钮保存。 </p>
<p><img src="../assets/clip_image001-2474914.png" alt="img" /></p>
<p>然后就可以在Grafana的数据源列表中看到刚创建好的TDengine的数据源</p>
<p><img src="../assets/clip_image001-2474939.png" alt="img" /></p>
<p>基于上面的步骤就可以在创建Dashboard的时候使用TDengine数据源如下图所示 </p>
<p><img src="../assets/clip_image001-2474961.png" alt="img" /></p>
<p>然后可以点击Add Query按钮增加一个新查询。</p>
<p>在INPUT SQL输入框中输入查询SQL语句该SQL语句的结果集应为两行多列的曲线数据例如SELECT count(*) FROM sys.cpu WHERE ts&gt;=from and ts&lt;to interval(interval)。其中from、to和interval为TDengine插件的内置变量表示从Grafana插件面板获取的查询范围和时间间隔。</p>
<p>ALIAS BY输入框为查询的别名点击GENERATE SQL 按钮可以获取发送给TDengine的SQL语句。如下图所示 </p>
<p><img src="../assets/clip_image001-2474987.png" alt="img" /></p>
<p>关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息请参考Grafana官方的<a href="https://grafana.com/docs/">文档</a></p>
<a class='anchor' id='Matlab'></a><h2>Matlab</h2>
<p>MatLab可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。</p>
<a class='anchor' id='MatLab的JDBC接口适配'></a><h3>MatLab的JDBC接口适配</h3>
<p>MatLab的适配有下面几个步骤下面以Windows10上适配MatLab2017a为例</p>
<ul>
<li>将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox</li>
<li>将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64</li>
<li>将新添加的驱动jar包加入MatLab的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行</li>
</ul>
<p> <code>$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar</code></p>
<ul>
<li>在${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\下添加一个文件javalibrarypath.txt, 并在该文件中添加taos.dll的路径比如您的taos.dll是在安装时拷贝到了C:\Windows\System32下那么就应该在javalibrarypath.txt中添加如下一行</li>
</ul>
<p> <code>C:\Windows\System32</code></p>
<a class='anchor' id='在MatLab中连接TDengine获取数据'></a><h3>在MatLab中连接TDengine获取数据</h3>
<p>在成功进行了上述配置后打开MatLab。</p>
<ul>
<li><p>创建一个连接:</p>
<p><code>conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)</code></p></li>
<li><p>执行一次查询:</p>
<p><code>sql0 = [select * from tb]</code></p>
<p><code>data = select(conn, sql0);</code></p></li>
<li><p>插入一条记录:</p>
<p><code>sql1 = [insert into tb values (now, 1)]</code></p>
<p><code>exec(conn, sql1)</code></p></li>
</ul>
<p>更多例子细节请参考安装包内examples\Matlab\TDengineDemo.m文件。</p>
<a class='anchor' id='R'></a><h2>R</h2>
<p>R语言支持通过JDBC接口来连接TDengine数据库。首先需要安装R语言的JDBC包。启动R语言环境然后执行以下命令安装R语言的JDBC支持库</p>
<pre><code class="R language-R">install.packages('rJDBC', repos='http://cran.us.r-project.org')</code></pre>
<p>安装完成以后,通过执行<code>library('RJDBC')</code>命令加载 <em>RJDBC</em> 包:</p>
<p>然后加载TDengine的JDBC驱动</p>
<pre><code class="R language-R">drv&lt;-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-1.0.0-dist.jar", identifier.quote="\"")</code></pre>
<p>如果执行成功,不会出现任何错误信息。之后通过以下命令尝试连接数据库:</p>
<pre><code class="R language-R">conn&lt;-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&amp;password=taosdata","root","taosdata")</code></pre>
<p>注意将上述命令中的IP地址替换成正确的IP地址。如果没有任务错误的信息则连接数据库成功否则需要根据错误提示调整连接的命令。TDengine支持以下的 <em>RJDBC</em> 包中函数:</p>
<ul>
<li>dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)将数据框iris写入表test中overwrite必须设置为falseappend必须设为TRUE,且数据框iris要与表test的结构一致。</li>
<li>dbGetQuery(conn, "select count(*) from test"):查询语句</li>
<li>dbSendUpdate(conn, "use db")执行任何非查询sql语句。例如dbSendUpdate(conn, "use db") 写入数据dbSendUpdate(conn, "insert into t1 values(now, 99)")等。</li>
<li>dbReadTable(conn, "test")读取表test中数据</li>
<li>dbDisconnect(conn):关闭连接</li>
<li>dbRemoveTable(conn, "test")删除表test</li>
</ul>
<p>TDengine客户端暂不支持如下函数</p>
<ul>
<li>dbExistsTable(conn, "test")是否存在表test</li>
<li>dbListTables(conn):显示连接中的所有表</li>
</ul><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,262 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/connector-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/connector-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>连接器</h1>
<p>TDengine提供了丰富的应用程序开发接口其中包括C/C++、JAVA、Python、RESTful、Go等便于用户快速开发应用。</p>
<a class='anchor' id='C/C++-Connector'></a><h2>C/C++ Connector</h2>
<p>C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine头文件 <em>taos.h</em>(安装后,位于<em>/usr/local/taos/include</em></p>
<pre><code class="C language-C">#include &lt;taos.h&gt;</code></pre>
<p>在编译时需要链接TDengine动态库<em>libtaos.so</em>(安装后,位于/usr/local/taos/drivergcc编译时请加上 -ltaos。 所有API都以返回<em>-1</em><em>NULL</em>均表示失败。</p>
<a class='anchor' id='C/C++同步API'></a><h3>C/C++同步API</h3>
<p>传统的数据库操作API都属于同步操作。应用调用API后一直处于阻塞状态直到服务器返回结果。TDengine支持如下API</p>
<ul>
<li><p><code>TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)</code></p>
<p>创建数据库连接初始化连接上下文。其中需要用户提供的参数包含TDengine管理主节点的IP地址、用户名、密码、数据库名字和端口号。如果用户没有提供数据库名字也可以正常连接用户可以通过该连接创建新的数据库如果用户提供了数据库名字则说明该数据库用户已经创建好缺省使用该数据库。返回值为空表示失败。应用程序需要保存返回的参数以便后续API调用。</p></li>
<li><p><code>void taos_close(TAOS *taos)</code></p>
<p>关闭连接, 其中taos是taos_connect函数返回的指针。</p></li>
<li><p><code>int taos_query(TAOS *taos, char *sqlstr)</code></p>
<p>该API用来执行SQL语句可以是DQL语句也可以是DML语句或者DDL语句。其中的taos参数是通过taos_connect()获得的指针。返回值-1表示失败。</p></li>
<li><p><code>TAOS_RES *taos_use_result(TAOS *taos)</code></p>
<p>选择相应的查询结果集。</p></li>
<li><p><code>TAOS_ROW taos_fetch_row(TAOS_RES *res)</code></p>
<p>按行获取查询结果集中的数据。</p></li>
<li><p><code>int taos_num_fields(TAOS_RES *res)</code></p>
<p>获取查询结果集中的列数。</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)</code></p>
<p>获取查询结果集每列数据的属性数据类型、名字、字节数与taos_num_fileds配合使用可用来解析taos_fetch_row返回的一个元组(一行)的数据。</p></li>
<li><p><code>void taos_free_result(TAOS_RES *res)</code></p>
<p>释放查询结果集以及相关的资源。查询完成后务必调用该API释放资源否则可能导致应用内存泄露。</p></li>
<li><p><code>void taos_init()</code></p>
<p>初始化环境变量。如果应用没有主动调用该API那么应用在调用taos_connect时将自动调用。因此一般情况下应用程序无需手动调用该API。 </p></li>
<li><p><code>char *taos_errstr(TAOS *taos)</code></p>
<p>获取最近一次API调用失败的原因,返回值为字符串。</p></li>
<li><p><code>char *taos_errno(TAOS *taos)</code></p>
<p>获取最近一次API调用失败的原因返回值为错误代码。</p></li>
<li><p><code>int taos_options(TSDB_OPTION option, const void * arg, ...)</code></p>
<p>设置客户端选项,目前只支持时区设置(<em>TSDB_OPTION_TIMEZONE</em>)和编码设置(<em>TSDB_OPTION_LOCALE</em>)。时区和编码默认为操作系统当前设置。 </p></li>
</ul>
<p>上述12个API是C/C++接口中最重要的API剩余的辅助API请参看<em>taos.h</em>文件。</p>
<p><strong>注意</strong>对于单个数据库连接在同一时刻只能有一个线程使用该链接调用API否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理。</p>
<a class='anchor' id='C/C++异步API'></a><h3>C/C++异步API</h3>
<p>同步API之外TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下异步API处理数据插入的速度比同步API快2~4倍。异步API采用非阻塞式的调用方式在系统真正完成某个具体数据库操作前立即返回。调用的线程可以去处理其他工作从而可以提升整个应用的性能。异步API在网络延迟严重的情况下优点尤为突出。</p>
<p>异步API都需要应用提供相应的回调函数回调函数参数设置如下前两个参数都是一致的第三个参数依不同的API而定。第一个参数param是应用调用异步API时提供给系统的用于回调时应用能够找回具体操作的上下文依具体实现而定。第二个参数是SQL操作的结果集如果为空比如insert操作表示没有记录返回如果不为空比如select操作表示有记录返回。</p>
<p>异步API对于使用者的要求相对较高用户可根据具体应用场景选择性使用。下面是三个重要的异步API </p>
<ul>
<li><p><code>void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int code), void *param);</code></p>
<p>异步执行SQL语句。taos是调用taos_connect返回的数据库连接结构体。sqlstr是需要执行的SQL语句。fp是用户定义的回调函数。param是应用提供一个用于回调的参数。回调函数fp的第三个参数code用于指示操作是否成功0表示成功负数表示失败(调用taos_errstr获取失败原因)。应用在定义回调函数的时候主要处理第二个参数TAOS_RES *,该参数是查询返回的结果集。 </p></li>
<li><p><code>void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);</code></p>
<p>批量获取异步查询的结果集只能与taos_query_a配合使用。其中<em>res</em>是_taos_query_a回调时返回的结果集结构体指针fp为回调函数。回调函数中的param是用户可定义的传递给回调函数的参数结构体。numOfRows表明有fetch数据返回的行数numOfRows并不是本次查询满足查询条件的全部元组数量。在回调函数中应用可以通过调用taos_fetch_row前向迭代获取批量记录中每一行记录。读完一块内的所有记录后应用需要在回调函数中继续调用taos_fetch_rows_a获取下一批记录进行处理直到返回的记录数numOfRows为零结果返回完成或记录数为负值查询出错</p></li>
<li><p><code>void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);</code></p>
<p>异步获取一条记录。其中res是taos_query_a回调时返回的结果集结构体指针。fp为回调函数。param是应用提供的一个用于回调的参数。回调时第三个参数TAOS_ROW指向一行记录。不同于taos_fetch_rows_a应用无需调用同步API taos_fetch_row来获取一个元组更加简单。数据提取性能不及批量获取的API。</p></li>
</ul>
<p>TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表并可以同时对每张打开的表进行查询或者插入操作。需要指出的是<strong>客户端应用必须确保对同一张表的操作完全串行化</strong>,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。</p>
<a class='anchor' id='C/C++-连续查询接口'></a><h3>C/C++ 连续查询接口</h3>
<p>TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单仅有打开、关闭流的API。具体如下 </p>
<ul>
<li><p><code>TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param)</code></p>
<p>该API用来创建数据流其中taos是调用taos_connect返回的结构体指针sqlstr是SQL查询语句仅能使用查询语句fp是用户定义的回调函数指针每次流式计算完成后均回调该函数用户可在该函数内定义其内部业务逻辑param是应用提供的用于回调的一个参数回调时提供给应用stime是流式计算开始的时间如果是0表示从现在开始如果不为零表示从指定的时间开始计算UTC时间从1970/1/1算起的毫秒数。返回值为NULL表示创建成功返回值不为空表示成功。TDengine将查询的结果TAOS_ROW、查询状态TAOS_RES、用户定义参数PARAM传递给回调函数在回调函数内用户可以使用taos_num_fields获取结果集列数taos_fetch_fields获取结果集每列数据的类型。</p></li>
<li><p><code>void taos_close_stream (TAOS_STREAM *tstr)</code></p>
<p>关闭数据流其中提供的参数是taos_open_stream的返回值。用户停止流式计算的时候务必关闭该数据流。</p></li>
</ul>
<a class='anchor' id='C/C++-数据订阅接口'></a><h3>C/C++ 数据订阅接口</h3>
<p>订阅API目前支持订阅一张表并通过定期轮询的方式不断获取写入表中的最新数据。 </p>
<ul>
<li><p><code>TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, long time, int mseconds)</code></p>
<p>该API用来启动订阅需要提供的参数包含TDengine管理主节点的IP地址、用户名、密码、数据库、数据库表的名字time是开始订阅消息的时间是从1970年1月1日起计算的毫秒数为长整型, 如果设为0表示从当前时间开始订阅mseconds为查询数据库更新的时间间隔单位为毫秒建议设为1000毫秒。返回值为一指向TDengine_SUB结构的指针如果返回为空表示失败。</p></li>
<li><p><code>TAOS_ROW taos_consume(TAOS_SUB *tsub)</code></p>
<p>该API用来获取最新消息应用程序一般会将其置于一个无限循环语句中。其中参数tsub是taos_subscribe的返回值。如果数据库有新的记录该API将返回返回参数是一行记录。如果没有新的记录该API将阻塞。如果返回值为空说明系统出错需要检查系统是否还在正常运行。</p></li>
<li><p><code>void taos_unsubscribe(TAOS_SUB *tsub)</code></p>
<p>该API用于取消订阅参数tsub是taos_subscribe的返回值。应用程序退出时需要调用该API否则有资源泄露。</p></li>
<li><p><code>int taos_num_fields(TAOS_SUB *tsub)</code></p>
<p>该API用来获取返回的一排数据中数据的列数</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)</code></p>
<p>该API用来获取每列数据的属性数据类型、名字、字节数与taos_num_subfileds配合使用可用来解析返回的一排数据。</p></li>
</ul>
<a class='anchor' id='Java-Connector'></a><h2>Java Connector</h2>
<a class='anchor' id='JDBC接口'></a><h3>JDBC接口</h3>
<p>如果用户使用Java开发企业级应用可选用TDengine提供的JDBC Driver来调用服务。TDengine提供的JDBC Driver是标准JDBC规范的子集遵循JDBC 标准(3.0)API规范支持现有的各种Java开发框架。目前TDengine的JDBC driver并未发布到在线依赖仓库比如maven的中心仓库。因此用户开发时需要手动把驱动包<code>taos-jdbcdriver-x.x.x-dist.jar</code>安装到开发环境的依赖仓库中。</p>
<p>TDengine 的驱动程序包的在不同操作系统上依赖不同的本地函数库均由C语言编写。Linux系统上依赖一个名为<code>libtaos.so</code> 的本地库,.so即"Shared Object"缩写。成功安装TDengine后<code>libtaos.so</code> 文件会被自动拷贝至<code>/usr/local/lib/taos</code>目录下该目录也包含在Linux上自动扫描路径上。Windows系统上JDBC驱动程序依赖于一个名为<code>taos.dll</code> 的本地库,.dll是动态链接库"Dynamic Link Library"的缩写。Windows上成功安装客户端后JDBC驱动程序包默认位于<code>C:/TDengine/driver/JDBC/</code>目录下;其依赖的动态链接库<code>taos.dll</code>文件位于<code>C:/TDengine/driver/C</code>目录下,<code>taos.dll</code> 会被自动拷贝至系统默认搜索路径<code>C:/Windows/System32</code>下。</p>
<p>TDengine的JDBC Driver遵循标准JDBC规范开发人员可以参考Oracle官方的JDBC相关文档来找到具体的接口和方法的定义与用法。TDengine的JDBC驱动在连接配置和支持的方法上与传统数据库驱动稍有不同。 </p>
<p>TDengine的JDBC URL规范格式为</p>
<p><code>jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&amp;password={password}|&amp;charset={charset}|&amp;cfgdir={config_dir}|&amp;locale={locale}|&amp;timezone={timezone}]</code></p>
<p>其中,<code>{}</code>中的内容必须,<code>[]</code>中为可选。配置参数说明如下:</p>
<ul>
<li>user登陆TDengine所用用户名默认值root</li>
<li>password用户登陆密码默认值taosdata</li>
<li>charset客户端使用的字符集默认值为系统字符集</li>
<li>cfgdir客户端配置文件目录路径Linux OS上默认值<code>/etc/taos</code> Windows OS上默认值 <code>C:/TDengine/cfg</code></li>
<li>locale客户端语言环境默认值系统当前locale</li>
<li>timezone客户端使用的时区默认值为系统当前时区</li>
</ul>
<p>以上所有参数均可在调用java.sql.DriverManager类创建连接时指定示例如下</p>
<pre><code class="java language-java">import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
import com.taosdata.jdbc.TSDBDriver;
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&amp;password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMEZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}</code></pre>
<p>这些配置参数中除了cfgdir外均可在客户端配置文件taos.cfg中进行配置。调用java.sql.DriverManager时声明的配置参数优先级最高JDBC URL的优先级次之配置文件的优先级最低。例如charset同时在配置文件taos.cfg中配置也在JDBC URL中配置则使用JDBC URL中的配置值。</p>
<p>此外尽管TDengine的JDBC驱动实现尽可能的与关系型数据库驱动保持一致但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致TDengine的Java API并不能与标准完全相同。对于有大量关系型数据库开发经验而初次接触TDengine的开发者来说有以下一些值的注意的地方</p>
<ul>
<li>TDengine不提供针对单条数据记录的删除和修改的操作驱动中也没有支持相关方法</li>
<li>目前TDengine不支持表间的join或union操作因此也缺乏对该部分API的支持</li>
<li>TDengine支持批量写入但是支持停留在SQL语句级别而不是API级别也就是说用户需要通过写特殊的SQL语句来实现批量</li>
<li>目前TDengine不支持嵌套查询(nested query)对每个Connection的实例至多只能有一个打开的ResultSet实例如果在ResultSet还没关闭的情况下执行了新的查询TSDBJDBCDriver则会自动关闭上一个ResultSet</li>
</ul>
<p>对于TDengine操作的报错信息用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码请参考TDengine提供的使用示范项目<code>JDBCDemo</code></p>
<a class='anchor' id='Python-Connector'></a><h2>Python Connector</h2>
<a class='anchor' id='安装准备'></a><h3>安装准备</h3>
<li>已安装TDengine, 如果客户端在Windows上需要安装Windows 版本的TDengine客户端</li>
<li>已安装python 2.7 or >= 3.4</li>
<li>已安装pip</li>
<a class='anchor' id='安装'></a><h3>安装</h3>
<a class='anchor' id='Linux'></a><h4>Linux</h4>
<p>用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包 然后通过pip命令安装</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/linux/python2/</code></pre>
<p>或者</p>
<pre><code>pip install src/connector/python/linux/python3/</code></pre>
<a class='anchor' id='Windows'></a><h4>Windows</h4>
<p>在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python2\</code></pre>
<p>或者</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python3\</code></pre>
<p>* 如果机器上没有<em>pip</em>命令用户可将src/connector/python/windows/python3或src/connector/python/windows/python2下的taos文件夹拷贝到应用程序的目录使用。 </p>
<a class='anchor' id='使用'></a><h3>使用</h3>
<a class='anchor' id='代码示例'></a><h4>代码示例</h4>
<li>导入TDengine客户端模块</li>
<pre><code class="python language-python">import taos </code></pre>
<li>获取连接</li>
<pre><code>
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
</code></pre>
<p>* <em>host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录</em></p>
<li>写入数据</li>
<pre><code>
import datetime
# 创建数据库
c1.execute('create database db')
c1.execute('use db')
# 建表
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# 插入数据
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# 批量插入数据
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>查询数据</li>
<code><pre>
c1.execute('select * from tb')
# 拉取查询结果
data = c1.fetchall()
# 返回的结果是一个列表,每一行构成列表的一个元素
numOfRows = c1.rowcount
numOfCols = c1.descriptions
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# 直接使用cursor 循环拉取查询结果
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
</pre></code>
<li>关闭连接</li>
<code><pre>
c1.close()
conn.close()
</pre></code>
<a class='anchor' id='帮助信息''></a><h4>帮助信息</h4>
<p>用户可通过python的帮助信息直接查看模块的使用信息或者参考code/examples/python中的示例程序。以下为部分常用类和方法:</p>
<ul>
<li><p><em>TaosConnection</em></p>
<p>参考python中<code>help(taos.TDengineConnection)</code></p></li>
<li><p><em>TaosCursor</em></p>
<p>参考python中<code>help(taos.TDengineCursor)</code></p></li>
<li><p>connect 方法</p>
<p>用于生成taos.TDengineConnection的实例。</p></li>
</ul>
<a class='anchor' id='RESTful-Connector'></a><h2>RESTful Connector</h2>
<p>为支持各种不同类型平台的开发TDengine提供符合REST设计标准的API即RESTful API。为最大程度降低学习成本不同于其他数据库RESTful API的设计方法TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库仅需要一个URL。 </p>
<a class='anchor' id='HTTP请求格式'></a><h3>HTTP请求格式</h3>
<p> <code>http://&lt;ip&gt;:&lt;PORT&gt;/rest/sql</code></p>
<p> 参数说明:</p>
<p> IP: 集群中的任一台主机</p>
<p> PORT: 配置文件中httpPort配置项缺省为6020 </p>
<p>http://192.168.0.1:6020/rest/sql 是指向IP地址为192.168.0.1的URL. </p>
<p>HTTP请求的Header里需带有身份认证信息TDengine单机版仅支持Basic认证机制。</p>
<p>HTTP请求的BODY里就是一个完整的SQL语句SQL语句中的数据表应提供数据库前缀例如\<db-name>.\<tb-name>。如果表名不带数据库前缀系统会返回错误。因为HTTP模块只是一个简单的转发没有当前DB的概念。 </p>
<p>使用curl来发起一个HTTP Request, 语法如下:</p>
<pre><code>curl -H 'Authorization: Basic &lt;TOKEN&gt;' -d '&lt;SQL&gt;' &lt;ip&gt;:&lt;PORT&gt;/rest/sql</code></pre>
<p>或者</p>
<pre><code>curl -u username:password -d '&lt;SQL&gt;' &lt;ip&gt;:&lt;PORT&gt;/rest/sql</code></pre>
<p>其中,<code>TOKEN</code><code>{username}:{password}</code>经过Base64编码之后的字符串例如<code>root:taosdata</code>编码后为<code>cm9vdDp0YW9zZGF0YQ==</code></p>
<a class='anchor' id='HTTP返回格式'></a><h3>HTTP返回格式</h3>
<p>返回值为JSON格式如下</p>
<pre><code>{
"status": "succ",
"head": ["column1","column2", …],
"data": [
["2017-12-12 23:44:25.730", 1],
["2017-12-12 22:44:25.728", 4]
],
"rows": 2
} </code></pre>
<p>说明:</p>
<ul>
<li>第一行”status”告知操作结果是成功还是失败;</li>
<li>第二行”head”是表的定义如果不返回结果集仅有一列“affected_rows”;</li>
<li>第三行是具体返回的数据,一排一排的呈现。如果不返回结果集,仅[[affected_rows]]</li>
<li>第四行”rows”表明总共多少行数据</li>
</ul>
<a class='anchor' id='使用示例'></a><h3>使用示例</h3>
<ul>
<li><p>在demo库里查询表t1的所有记录, curl如下 </p>
<p><code>curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql</code></p>
<p>返回值:</p></li>
</ul>
<pre><code>{
"status": "succ",
"head": ["column1","column2","column3"],
"data": [
["2017-12-12 23:44:25.730", 1, 2.3],
["2017-12-12 22:44:25.728", 4, 5.6]
],
"rows": 2
}</code></pre>
<ul>
<li><p>创建库demo</p>
<p><code>curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql</code></p>
<p>返回值:</p></li>
</ul>
<pre><code>{
"status": "succ",
"head": ["affected_rows"],
"data": [[1]],
"rows": 1,
}</code></pre>
<a class='anchor' id='Go-Connector'></a><h2>Go Connector</h2>
<p>TDengine提供了GO驱动程序“taosSql”包。taosSql驱动包是基于GO的“database/sql/driver”接口的实现。用户可在安装后的/usr/local/taos/connector/go目录获得GO的客户端驱动程序。用户需将驱动包/usr/local/taos/connector/go/src/taosSql目录拷贝到应用程序工程的src目录下。然后在应用程序中导入驱动包就可以使用“database/sql”中定义的接口访问TDengine</p>
<pre><code class="Go language-Go">import (
"database/sql"
_ "taosSql"
)</code></pre>
<p>taosSql驱动包内采用cgo模式调用了TDengine的C/C++同步接口与TDengine进行交互因此在数据库操作执行完成之前客户端应用将处于阻塞状态。单个数据库连接在同一时刻只能有一个线程调用API。客户应用可以建立多个连接进行多线程的数据写入或查询处理。</p>
<p>更多使用的细节,请参考下载目录中的示例源码。</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,128 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/data-model-and-architecture-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/data-model-and-architecture-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>数据模型和设计</h1>
<a class='anchor' id='数据模型'></a><h2>数据模型</h2>
<a class='anchor' id='物联网典型场景'></a><h3>物联网典型场景</h3>
<p>在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据类似如下的表格:</p>
<figure><table>
<thead>
<tr>
<th style="text-align:center;">Device ID</th>
<th style="text-align:center;">Time Stamp</th>
<th style="text-align:center;">Value 1</th>
<th style="text-align:center;">Value 2</th>
<th style="text-align:center;">Value 3</th>
<th style="text-align:center;">Tag 1</th>
<th style="text-align:center;">Tag 2</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:center;">D1001</td>
<td style="text-align:center;">1538548685000</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">219</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Red</td>
<td style="text-align:center;">Tesla</td>
</tr>
<tr>
<td style="text-align:center;">D1002</td>
<td style="text-align:center;">1538548684000</td>
<td style="text-align:center;">10.2</td>
<td style="text-align:center;">220</td>
<td style="text-align:center;">0.23</td>
<td style="text-align:center;">Blue</td>
<td style="text-align:center;">BMW</td>
</tr>
<tr>
<td style="text-align:center;">D1003</td>
<td style="text-align:center;">1538548686500</td>
<td style="text-align:center;">11.5</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.35</td>
<td style="text-align:center;">Black</td>
<td style="text-align:center;">Honda</td>
</tr>
<tr>
<td style="text-align:center;">D1004</td>
<td style="text-align:center;">1538548685500</td>
<td style="text-align:center;">13.4</td>
<td style="text-align:center;">223</td>
<td style="text-align:center;">0.29</td>
<td style="text-align:center;">Red</td>
<td style="text-align:center;">Volvo</td>
</tr>
<tr>
<td style="text-align:center;">D1001</td>
<td style="text-align:center;">1538548695000</td>
<td style="text-align:center;">12.6</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.33</td>
<td style="text-align:center;">Red</td>
<td style="text-align:center;">Tesla</td>
</tr>
<tr>
<td style="text-align:center;">D1004</td>
<td style="text-align:center;">1538548696600</td>
<td style="text-align:center;">11.8</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.28</td>
<td style="text-align:center;">Black</td>
<td style="text-align:center;">Honda</td>
</tr>
</tbody>
</table></figure>
<p>每一条记录都有设备ID时间戳采集的物理量还有与每个设备相关的静态标签。每个设备是受外界的触发或按照设定的周期采集数据。采集的数据点是时序的是一个数据流。</p>
<a class='anchor' id='数据特征'></a><h3>数据特征</h3>
<p>除时序特征外,仔细研究发现,物联网、车联网、运维监测类数据还具有很多其他明显的特征。</p>
<ol>
<li>数据是结构化的;</li>
<li>数据极少有更新或删除操作;</li>
<li>无需传统数据库的事务处理;</li>
<li>相对互联网应用,写多读少;</li>
<li>流量平稳,根据设备数量和采集频次,可以预测出来;</li>
<li>用户关注的是一段时间的趋势,而不是某一特点时间点的值;</li>
<li>数据是有保留期限的;</li>
<li>数据的查询分析一定是基于时间段和地理区域的;</li>
<li>除存储查询外,还往往需要各种统计和实时计算操作;</li>
<li>数据量巨大一天采集的数据就可以超过100亿条。</li>
</ol>
<p>充分利用上述特征TDengine采取了一特殊的优化的存储和计算设计来处理时序数据能将系统处理能力显著提高。</p>
<a class='anchor' id='关系型数据库模型'></a><h3>关系型数据库模型</h3>
<p>因为采集的数据一般是结构化数据而且为降低学习门槛TDengine采用传统的关系型数据库模型管理数据。因此用户需要先创建库然后创建表之后才能插入或查询数据。</p>
<a class='anchor' id='一个设备一张表'></a><h3>一个设备一张表</h3>
<p>为充分利用其数据的时序性和其他数据特点TDengine要求<strong>对每个数据采集点单独建表</strong>比如有一千万个智能电表就需创建一千万张表上述表格中的D1001, D1002, D1003, D1004都需单独建表用来存储这个采集点所采集的时序数据。这种设计能保证一个采集点的数据在存储介质上是一块一块连续的大幅减少随机读取操作成数量级的提升读取和查询速度。而且由于不同数据采集设备产生数据的过程完全独立每个设备只产生属于自己的数据一张表也就只有一个写入者。这样每个表就可以采用无锁方式来写写入速度就能大幅提升。同时对于一个数据采集点而言其产生的数据是时序的因此写的操作可用追加的方式实现进一步大幅提高数据写入速度。</p>
<a class='anchor' id='数据建模最佳实践'></a><h3>数据建模最佳实践</h3>
<p><strong>表(Table)</strong>TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。每个数据采集点可能同时采集多个物理量(如上表中的value1, value2, value3),每个物理量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。有的设备有多组采集量每一组的采集频次是不一样的这是需要对同一个设备建多张表。对采集的数据TDengine将自动按照时间戳建立索引但对采集的物理量不建任何索引。数据是用列式存储方式保存。 </p>
<p><strong>超级表(Super Table)</strong>对于同一类型的采集点为保证Schema的一致性而且为便于聚合统计操作可以先定义超级表STable(详见第10章),然后再定义表。每个采集点往往还有静态标签信息(如上表中的Tag 1, Tag 2),比如设备型号、颜色等,这些静态信息不会保存在存储采集数据的数据节点中,而是通过超级表保存在元数据节点中。这些静态标签信息将作为过滤条件,用于采集点之间的数据聚合统计操作。</p>
<p><strong>库(DataBase)</strong>不同的数据采集点往往具有不同的数据特征包括数据采集频率高低数据保留时间长短备份数目单个字段大小等等。为让各种场景下TDengine都能最大效率的工作TDengine建议将不同数据特征的表创建在不同的库里。创建一个库时除SQL标准的选项外应用还可以指定保留时长、数据备份的份数、cache大小、文件块大小、是否压缩等多种参数(详见第19章)。</p>
<p><strong>Schemaless vs Schema</strong>: 与NoSQL的各种引擎相比由于应用需要定义schema插入数据的灵活性降低。但对于物联网、金融这些典型的时序数据场景schema会很少变更因此这个灵活性不够的设计就不成问题。相反TDengine采用结构化数据来进行处理的方式将让查询、分析的性能成数量级的提升。</p>
<p>TDengine对库的数量、超级表的数量以及表的数量没有做任何限制而且其多少不会对性能产生影响应用按照自己的场景创建即可。</p>
<a class='anchor' id='主要模块'></a><h2>主要模块</h2>
<p>如图所示TDengine服务主要包含两大模块<strong>管理节点模块(MGMT)</strong><strong>数据节点模块(DNODE)</strong>。整个TDengine还包含<strong>客户端模块</strong></p>
<p><center> <img src="../assets/structure.png"> </center>
<center> 图 1 TDengine架构示意图 </center></p>
<a class='anchor' id='管理节点模块'></a><h3>管理节点模块</h3>
<p>管理节点模块主要负责元数据的存储和查询等工作其中包括用户信息的管理、数据库和表信息的创建、删除以及查询等。应用连接TDengine时会首先连接到管理节点。在创建/删除数据库和表时,请求也会首先发送请求到管理节点模块。由管理节点模块首先创建/删除元数据信息,然后发送请求到数据节点模块进行分配/删除所需要的资源。在数据写入和查询时,应用同样会首先访问管理节点模块,获取元数据信息。然后根据元数据管理信息访问数据节点模块。</p>
<a class='anchor' id='数据节点模块'></a><h3>数据节点模块</h3>
<p>写入数据的存储和查询工作是由数据节点模块负责。 为了更高效地利用资源以及方便将来进行水平扩展TDengine内部对数据节点进行了虚拟化引入了虚拟节点(virtual node, 简称vnode的概念作为存储、资源分配以及数据备份的单元。如图2所示在一个dnode上通过虚拟化可以将该dnode视为多个虚拟节点的集合。</p>
<p>创建一个库时系统会自动分配vnode。每个vnode存储一定数量的表中的数据但一个表只会存在于一个vnode里不会跨vnode。一个vnode只会属于一个库但一个库会有一到多个vnode。不同的vnode之间资源互不共享。每个虚拟节点都有自己的缓存在硬盘上也有自己的存储目录。而同一vnode内部无论是缓存还是硬盘的存储都是共享的。通过虚拟化TDengine可以将dnode上有限的物理资源合理地分配给不同的vnode大大提高资源的利用率和并发度。一台物理机器上的虚拟节点个数可以根据其硬件资源进行配置。</p>
<p><center> <img src="../assets/vnode.png"> </center>
<center> 图 2 TDengine虚拟化 </center></p>
<a class='anchor' id='客户端模块'></a><h3>客户端模块</h3>
<p>TDengine客户端模块主要负责将应用传来的请求SQL语句进行解析转化为内部结构体再发送到服务端。TDengine的各种接口都是基于TDengine的客户端模块进行开发的。客户端模块与管理模块使用TCP/UDP通讯端口号由系统参数mgmtShellPort配置, 缺省值为6030。客户端与数据节点模块也是使用TCP/UDP通讯端口号由系统参数vnodeShellPort配置, 缺省值为6035。两个端口号均可通过<a href="../administrator/#Configuration-on-Server">系统配置文件taos.cfg</a>进行个性化设置。</p>
<a class='anchor' id='写入流程'></a><h2>写入流程</h2>
<p>TDengine的完整写入流程如图3所示。为了保证写入数据的安全性和完整性TDengine在写入数据时采用[预写日志算法]。客户端发来的数据在经过验证以后首先会写入预写日志中以保证TDengine能够在断电等因素导致的服务重启时从预写日志中恢复数据避免数据的丢失。写入预写日志后数据会被写到对应的vnode的缓存中。随后服务端会发送确认信息给客户端表示写入成功。TDengine中存在两种机制可以促使缓存中的数据写入到硬盘上进行持久化存储</p>
<p><center> <img src="../assets/write_process.png"> </center>
<center> 图 3 TDengine写入流程 </center></p>
<ol>
<li><strong>时间驱动的落盘</strong>TDengine服务会定时将vnode缓存中的数据写入到硬盘上默认为一个小时落一次盘。落盘间隔可在配置文件taos.cfg中通过参数commitTime配置。</li>
<li><strong>数据驱动的落盘</strong>当vnode中缓存的数据达到一定规模时为了不阻塞后续数据的写入TDengine也会拉起落盘线程将缓存中的数据清空。数据驱动的落盘会刷新定时落盘的时间。</li>
</ol>
<p>TDengine在数据落盘时会打开新的预写日志文件在落盘后则会删除老的预写日志文件避免日志文件无限制的增长。TDengine对缓存按照先进先出的原则进行管理以保证每个表的最新数据都在缓存中。</p>
<a class='anchor' id='数据存储'></a><h2>数据存储</h2>
<p>TDengine将所有数据存储在/var/lib/taos/目录下您可以通过系统配置参数dataDir进行个性化配置。</p>
<p>TDengine中的元数据信息包括TDengine中的数据库、表、用户等信息。每个超级表、以及每个表的标签数据也存放在这里。为提高访问速度元数据全部有缓存。</p>
<p>TDengine中写入的数据在硬盘上是按时间维度进行分片的。同一个vnode中的表在同一时间范围内的数据都存放在同一文件组中。这一数据分片方式可以大大简化数据在时间维度的查询提高查询速度。在默认配置下硬盘上的每个数据文件存放10天数据。用户可根据需要修改系统配置参数daysPerFile进行个性化配置。</p>
<p>表中的数据都有保存时间一旦超过保存时间缺省是3650天数据将被系统自动删除。您可以通过系统配置参数daysToKeep进行个性化设置。</p>
<p>数据在文件中是按块存储的。每个数据块只包含一张表的数据,且数据是按照时间主键递增排列的。数据在数据块中按列存储,这样使得同列的数据存放在一起,对于不同的数据类型还采用不同的压缩方法,大大提高压缩的比例,节省存储空间。</p>
<p>数据文件总共有三类文件一类是data文件它存放了真实的数据块该文件只进行追加操作一类文件是head文件, 它存放了其对应的data文件中数据块的索引信息第三类是last文件专门存储最后写入的数据每次落盘操作时这部分数据会与内存里的数据合并并决定是否写入data文件还是last文件。</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,33 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/faq-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/faq-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>常见问题</h1>
<h4>1. 遇到错误"failed to connect to server", 我怎么办?</h4>
<p>客户端遇到链接故障,请按照下面的步骤进行检查:</p>
<ol>
<li>在服务器,执行 <code>systemctl status taosd</code> 检查<em>taosd</em>运行状态。如果没有运行,启动<em>taosd</em></li>
<li>确认客户端连接时指定了正确的服务器IP地址</li>
<li>ping服务器IP如果没有反应请检查你的网络</li>
<li>检查防火墙设置确认TCP/UDP 端口6030-6039 是打开的</li>
<li>对于Linux上的JDBCODBC, Python, Go等接口类似连接, 确保<em>libtaos.so</em>在目录<em>/usr/local/lib/taos</em>里, 并且<em>/usr/local/lib/taos</em>在系统库函数搜索路径<em>LD_LIBRARY_PATH</em></li>
<li>对于windows上的JDBC, ODBC, Python, Go等连接确保<em>driver/c/taos.dll</em>在你的系统搜索目录里 (建议<em>taos.dll</em>放在目录 <em>C:\Windows\System32</em>)</li>
<li>如果仍不能排除连接故障请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作<code>nc -vuz {hostIP} {port}</code>
检查服务器侧TCP端口连接是否工作<code>nc -l {port}</code>
检查客户端侧TCP端口链接是否工作<code>nc {hostIP} {port}</code></li>
</ol>
<h4>2. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误</h4>
<p>如果你确认语法正确请检查SQL语句长度是否超过64K。如果超过也会返回这个错误。</p>
<h4>3. 为什么我删除超级表总是失败?</h4>
<p>请确保超级表下已经没有其他表,否则系统不允许删除该超级表。</p>
<h4>4. 是否支持validation queries?</h4>
<p>TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。</p>
<h4>5. 我可以删除或更新一条记录吗?</h4>
<p>不能。因为TDengine是为联网设备采集的数据设计的不容许修改。但TDengine提供数据保留策略只要数据记录超过保留时长就会被自动删除。</p>
<h4>6. 我怎么创建超过250列的表</h4>
<p>TDengine最大允许创建250列的表。但是如果确实超过我们建议按照数据特性逻辑地将这个宽表分解成几个小表。</p>
<h4>7. 最有效的写入数据的方法是什么?</h4>
<p>批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的记录。</p>
<h4>8. windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决</h4>
<p>windows下插入nchar类的数据中如果有中文请先确认系统的地区设置成了中国在Control Panel里可以设置这时cmd中的<code>taos</code>客户端应该已经可以正常工作了如果是在IDE里开发Java应用比如Eclipse Intellij请确认IDE里的文件编码为GBK这是Java默认的编码类型然后在生成Connection时初始化客户端的配置具体语句如下</p>
<p> Class.forName("com.taosdata.jdbc.TSDBDriver");</p>
<p> Properties properties = new Properties();</p>
<p> properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");</p>
<p> Connection = DriverManager.getConnection(url, properties);</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,88 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/getting-started-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/getting-started-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>立即开始</h1>
<a class='anchor' id='快速上手'></a><h2>快速上手</h2>
<p>TDengine目前只支持在Linux系统上安装和运行。用户可根据需求选择通过<a href="#通过源码安装">源码</a>或者<a href="#通过安装包安装">安装包</a>来安装。</p>
<a class='anchor' id='通过源码安装'></a><h3>通过源码安装</h3>
<p>请参考我们的<a href="https://github.com/taosdata/TDengine">TDengine github主页</a>下载源码并安装.</p>
<a class='anchor' id='通过安装包安装'></a><h3>通过安装包安装</h3>
<p>我们提供三种安装包请选择你所需要的。TDengine的安装非常简单从下载到安装成功仅仅只要几秒钟。</p>
<ul id='packageList'>
<li><a id='tdengine-rpm' style='color:var(--b2)'>TDengine RPM package (1.5M)</a></li>
<li><a id='tdengine-deb' style='color:var(--b2)'>TDengine DEB package (1.7M)</a></li>
<li><a id='tdengine-tar' style='color:var(--b2)'>TDengine Tarball (3.0M)</a></li>
</ul>
<p>目前TDengine只支持在使用<a href="https://en.wikipedia.org/wiki/Systemd"><code>systemd</code></a>做进程服务管理的linux系统上安装。其他linux系统的支持正在开发中。用<code>which</code>命令来检测系统中是否存在<code>systemd</code>:</p>
<pre><code class="cmd language-cmd">which systemd</code></pre>
<p>如果系统中不存在<code>systemd</code>命令,请考虑<a href="#通过源码安装">通过源码安装</a>TDengine。</p>
<a class='anchor' id='启动并体验TDengine'></a><h2>启动并体验TDengine</h2>
<p>安装成功后,用户可使用<code>systemctl</code>命令来启动TDengine的服务进程。</p>
<pre><code class="cmd language-cmd">systemctl start taosd</code></pre>
<p>检查服务是否正常工作。</p>
<pre><code class="cmd language-cmd">systemctl status taosd</code></pre>
<p>如果TDengine服务正常工作那么您可以通过TDengine的命令行程序<code>taos</code>来访问并体验TDengine。</p>
<p><strong>注:<em>systemctl</em> 命令需要 <em>root</em> 权限来运行,如果您非 <em>root</em> 用户,请在命令前添加 <em>sudo</em></strong></p>
<a class='anchor' id='TDengine命令行程序'></a><h2>TDengine命令行程序</h2>
<p>执行TDengine命令行程序您只要在Linux终端执行<code>taos</code>即可</p>
<pre><code class="cmd language-cmd">taos</code></pre>
<p>如果TDengine终端链接服务成功将会打印出欢迎消息和版本信息。如果失败则会打印错误消息出来请参考<a href="https://www.taosdata.com/cn/faq/">FAQ</a>来解决终端链接服务端失败的问题。TDengine终端的提示符号如下</p>
<pre><code class="cmd language-cmd">taos&gt;</code></pre>
<p>在TDengine终端中用户可以通过SQL命令来创建/删除数据库、表等并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例</p>
<pre><code class="mysql language-mysql">create database db;
use db;
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;
ts | speed |
===================================
19-07-15 00:00:00.000| 10|
19-07-15 01:00:00.000| 20|
Query OK, 2 row(s) in set (0.001700s)</code></pre>
<p>除执行SQL语句外系统管理员还可以从TDengine终端检查系统运行状态添加删除用户账号等。</p>
<a class='anchor' id='命令行参数'></a><h3>命令行参数</h3>
<p>您可通过配置命令行参数来改变TDengine终端的行为。以下为常用的几个命令行参数</p>
<ul>
<li>-c, --config-dir: 指定配置文件目录,默认为<em>/etc/taos</em></li>
<li>-h, --host: 指定服务的IP地址默认为本地服务</li>
<li>-s, --commands: 在不进入终端的情况下运行TDengine命令</li>
<li>-u, -- user: 链接TDengine服务器的用户名缺省为root</li>
<li>-p, --password: 链接TDengine服务器的密码缺省为taosdata</li>
<li>-?, --help: 打印出所有命令行参数</li>
</ul>
<p>示例:</p>
<pre><code class="cmd language-cmd">taos -h 192.168.0.1 -s "use db; show tables;"</code></pre>
<a class='anchor' id='运行SQL命令脚本'></a><h3>运行SQL命令脚本</h3>
<p>TDengine终端可以通过<code>source</code>命令来运行SQL命令脚本.</p>
<pre><code>taos&gt; source &lt;filename&gt;;</code></pre>
<a class='anchor' id='Shell小技巧'></a><h3>Shell小技巧</h3>
<ul>
<li>可以使用上下光标键查看已经历史输入的命令</li>
<li>修改用户密码。在shell中使用alter user命令</li>
<li>ctrl+c 中止正在进行中的查询</li>
<li>执行<code>RESET QUERY CACHE</code>清空本地缓存的表的schema</li>
</ul>
<a class='anchor' id='主要功能'></a><h2>主要功能</h2>
<p>TDengine的核心功能是时序数据库。除此之外为减少研发的复杂度、系统维护的难度TDengine还提供缓存、消息队列、订阅、流式计算等功能。更详细的功能如下</p>
<ul>
<li>使用类SQL语言插入或查询数据</li>
<li>支持C/C++, Java(JDBC), Python, Go, RESTful, and Node.JS 开发接口</li>
<li>可通过Python/R/Matlab or TDengine shell做Ad Hoc查询分析</li>
<li>通过定时连续查询支持基于滑动窗口的流式计算</li>
<li>使用超级表来更灵活高效的聚合多个时间线的数据</li>
<li>时间轴上聚合一个或多个时间线的数据</li>
<li>支持数据订阅,一旦有新数据,就立即通知应用</li>
<li>支持缓存,每个时间线或设备的最新数据都从内存里快速获取</li>
<li>历史数据与实时数据处理完全透明,不用区别对待</li>
<li>支持链接Telegraf, Grafana等第三方工具</li>
<li>成套的配置和工具让你更好的管理TDengine </li>
</ul>
<p>对于企业版TDengine还提供如下高级功能</p>
<ul>
<li>线性水平扩展能力,以提供更高的处理速度和数据容量</li>
<li>高可靠,无单点故障,提供运营商级别的服务</li>
<li>多个副本自动同步,而且可以跨机房</li>
<li>多级存储,让历史数据处理的成本更低</li>
<li>用户友好的管理后台和工具,让管理更轻松简单 </li>
</ul>
<p>TDengine是专为物联网、车联网、工业互联网、运维监测等场景优化设计的时序数据处理引擎。与其他方案相比它的插入查询速度都快10倍以上。单核一秒钟就能插入100万数据点读出1000万数据点。由于采用列式存储和优化的压缩算法存储空间不及普通数据库的1/10.</p>
<a class='anchor' id='深入了解TDengine'></a><h2>深入了解TDengine</h2>
<p>请继续阅读<a href="../documentation">文档</a>来深入了解TDengine。</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,269 +0,0 @@
.documentation strong {
font-weight:600;
}
.documentation {
overflow:hidden;
margin-bottom: 10rem;
}
.documentation a {
font-size:1em;
text-decoration: none;
}
.documentation > a > h2 {
cursor:pointer;
color:var(--sg1);
}
.documentation > a >h2:hover {
color:var(--b2);
}
.documentation a:hover {
text-decoration: none;
}
.documentation pre {
margin-top: 0;
margin-bottom: 7px;
overflow: auto;
-ms-overflow-style: scrollbar;
margin-top: 7px;
}
pre * {
font-family:monospace !important
}
.documentation a {
color:var(--b2);
padding-bottom: 2px;
position: relative;
font-style: normal;
cursor: pointer;
}
.documentation a:hover,a:focus {
text-decoration: none;
color:var(--b2);
}
.documentation a::before {
content: "";
left: 0;
background-color: var(--b2);
width: 0%;
height: 1px;
top:-webkit-calc(1em + 8px);
top:calc(1em + 8px);
position: absolute;
z-index: 2;
-webkit-transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
-o-transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;;
}
.documentation a:hover::before, .documentation a:focus::before {
content: "";
left: 0;
background-color: var(--b2);
width: 100%;
height: 1px;
top:-webkit-calc(1em + 8px);
top:calc(1em + 8px);
position: absolute;
z-index: 2;
-webkit-transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
-o-transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
text-decoration: none;
}
.documentation img {
width:100%;
max-width:640px;
margin-left: 50%;
-webkit-transform: translate(-50%,0);
-ms-transform: translate(-50%,0);
transform: translate(-50%,0);
}
h1,
h2,
h3,
h4,
h5,
h6 {
position: relative;
margin-bottom: 0.5rem;
font-weight: 500;
line-height: 1.4;
cursor: text;
}
h1:hover a.anchor,
h2:hover a.anchor,
h3:hover a.anchor,
h4:hover a.anchor,
h5:hover a.anchor,
h6:hover a.anchor {
text-decoration: none;
}
h1 tt,
h1 code {
font-size: inherit;
}
h2 tt,
h2 code {
font-size: inherit;
}
h3 tt,
h3 code {
font-size: inherit;
}
h4 tt,
h4 code {
font-size: inherit;
}
h5 tt,
h5 code {
font-size: inherit;
}
h6 tt,
h6 code {
font-size: inherit;
}
h1 {
font-size: 2.5rem;
line-height: 1.8;
}
h2 {
font-size: 1.7rem;
line-height: 1.8;
padding-left: 0.5em;
}
.documentation h2::before {
content:"";
height:1em;;
display: block;
width:3px;
margin-left: -0.5em;
margin-top: 0.4em;
position: absolute;
background-color: var(--b1);
}
h3 {
font-size: 1.4rem;
line-height: 1.43;
}
h4 {
font-size: 1.25rem;
}
h5 {
font-size: 1rem;
}
h6 {
font-size: 1rem;
color: #777;
}
p {
margin-bottom:0.5rem;
font-size:1em;
margin-top:0;
font-weight:300;
}
ol,ul,dl {
margin-top:0;
margin-bottom: 1rem;
}
li p {
margin-bottom: 0;
}
blockquote,
table{
margin: 0.8em 0;
width:100%;
}
figure table{
overflow: scroll;
}
hr {
height: 2px;
padding: 0;
margin: 16px 0;
background-color: #e7e7e7;
border: 0 none;
overflow: hidden;
-webkit-box-sizing: content-box;
box-sizing: content-box;
}
li p.first {
display: inline-block;
}
ul,
ol {
padding-left: 30px;
}
ul:first-child,
ol:first-child {
margin-top: 0;
}
ul:last-child,
ol:last-child {
margin-bottom: 0;
}
blockquote {
border-left: 4px solid #dfe2e5;
padding: 0 15px;
color: #777777;
}
blockquote blockquote {
padding-right: 0;
}
table {
padding: 0;
word-break: initial;
}
table tr {
border-top: 1px solid #dfe2e5;
margin: 0;
padding: 0;
}
table tr:nth-child(2n),
thead {
background-color: #f8f8f8;
}
table tr th {
font-weight: bold;
border: 1px solid #dfe2e5;
border-bottom: 0;
text-align: left;
margin: 0;
padding: 6px 13px;
}
table tr td {
border: 1px solid #dfe2e5;
text-align: left;
margin: 0;
padding: 6px 13px;
}
table tr th:first-child,
table tr td:first-child {
margin-top: 0;
}
table tr th:last-child,
table tr td:last-child {
margin-bottom: 0;
}
h1 code,h2 code, h3 code, h4 code, h5 code, h6 code,
p code, li code, td code,
tt {
border: 1px solid #e7eaed;
background-color: #f8f8f8;
-webkit-border-radius: 3px;
border-radius: 3px;
padding: 0;
font-size: 0.9em;
color:var(--sg1);
font-family:monospace;
background-color: #f3f4f4;
padding: 0 2px 0 2px;
}
/*Tell prettyprinted code not to follow above*/
.prettyprint code{
border:none;
background-color:transparent;
font-size:inherit;
padding:0 1px 0 0px;
}

View File

@ -1,19 +0,0 @@
/*JS to determine how many lines used in pre/code block, sets CSS appropriately. MUST be placed after elements with prettyprint class are loaded*/
$('.prettyprint').toArray().forEach(function(element){
let linenums = element.clientHeight / 25.2;
if (linenums > 99) {
$(element).addClass('threec');
}
else if (linenums > 9) {
$(element).addClass('twoc');
}
});
$('.prettyprint').toArray().forEach(function(element){
let linenums = element.clientHeight / 25.2;
if (linenums > 99) {
$(element).addClass('threec');
}
else if (linenums > 9) {
$(element).addClass('twoc');
}
});

View File

@ -1,46 +0,0 @@
!function(){/*
Copyright (C) 2006 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
"undefined"!==typeof window&&(window.PR_SHOULD_USE_CONTINUATION=!0);
(function(){function T(a){function d(e){var a=e.charCodeAt(0);if(92!==a)return a;var c=e.charAt(1);return(a=w[c])?a:"0"<=c&&"7">=c?parseInt(e.substring(1),8):"u"===c||"x"===c?parseInt(e.substring(2),16):e.charCodeAt(1)}function f(e){if(32>e)return(16>e?"\\x0":"\\x")+e.toString(16);e=String.fromCharCode(e);return"\\"===e||"-"===e||"]"===e||"^"===e?"\\"+e:e}function c(e){var c=e.substring(1,e.length-1).match(RegExp("\\\\u[0-9A-Fa-f]{4}|\\\\x[0-9A-Fa-f]{2}|\\\\[0-3][0-7]{0,2}|\\\\[0-7]{1,2}|\\\\[\\s\\S]|-|[^-\\\\]","g"));
e=[];var a="^"===c[0],b=["["];a&&b.push("^");for(var a=a?1:0,g=c.length;a<g;++a){var h=c[a];if(/\\[bdsw]/i.test(h))b.push(h);else{var h=d(h),k;a+2<g&&"-"===c[a+1]?(k=d(c[a+2]),a+=2):k=h;e.push([h,k]);65>k||122<h||(65>k||90<h||e.push([Math.max(65,h)|32,Math.min(k,90)|32]),97>k||122<h||e.push([Math.max(97,h)&-33,Math.min(k,122)&-33]))}}e.sort(function(e,a){return e[0]-a[0]||a[1]-e[1]});c=[];g=[];for(a=0;a<e.length;++a)h=e[a],h[0]<=g[1]+1?g[1]=Math.max(g[1],h[1]):c.push(g=h);for(a=0;a<c.length;++a)h=
c[a],b.push(f(h[0])),h[1]>h[0]&&(h[1]+1>h[0]&&b.push("-"),b.push(f(h[1])));b.push("]");return b.join("")}function m(e){for(var a=e.source.match(RegExp("(?:\\[(?:[^\\x5C\\x5D]|\\\\[\\s\\S])*\\]|\\\\u[A-Fa-f0-9]{4}|\\\\x[A-Fa-f0-9]{2}|\\\\[0-9]+|\\\\[^ux0-9]|\\(\\?[:!=]|[\\(\\)\\^]|[^\\x5B\\x5C\\(\\)\\^]+)","g")),b=a.length,d=[],g=0,h=0;g<b;++g){var k=a[g];"("===k?++h:"\\"===k.charAt(0)&&(k=+k.substring(1))&&(k<=h?d[k]=-1:a[g]=f(k))}for(g=1;g<d.length;++g)-1===d[g]&&(d[g]=++E);for(h=g=0;g<b;++g)k=a[g],
"("===k?(++h,d[h]||(a[g]="(?:")):"\\"===k.charAt(0)&&(k=+k.substring(1))&&k<=h&&(a[g]="\\"+d[k]);for(g=0;g<b;++g)"^"===a[g]&&"^"!==a[g+1]&&(a[g]="");if(e.ignoreCase&&q)for(g=0;g<b;++g)k=a[g],e=k.charAt(0),2<=k.length&&"["===e?a[g]=c(k):"\\"!==e&&(a[g]=k.replace(/[a-zA-Z]/g,function(a){a=a.charCodeAt(0);return"["+String.fromCharCode(a&-33,a|32)+"]"}));return a.join("")}for(var E=0,q=!1,l=!1,n=0,b=a.length;n<b;++n){var p=a[n];if(p.ignoreCase)l=!0;else if(/[a-z]/i.test(p.source.replace(/\\u[0-9a-f]{4}|\\x[0-9a-f]{2}|\\[^ux]/gi,
""))){q=!0;l=!1;break}}for(var w={b:8,t:9,n:10,v:11,f:12,r:13},r=[],n=0,b=a.length;n<b;++n){p=a[n];if(p.global||p.multiline)throw Error(""+p);r.push("(?:"+m(p)+")")}return new RegExp(r.join("|"),l?"gi":"g")}function U(a,d){function f(a){var b=a.nodeType;if(1==b){if(!c.test(a.className)){for(b=a.firstChild;b;b=b.nextSibling)f(b);b=a.nodeName.toLowerCase();if("br"===b||"li"===b)m[l]="\n",q[l<<1]=E++,q[l++<<1|1]=a}}else if(3==b||4==b)b=a.nodeValue,b.length&&(b=d?b.replace(/\r\n?/g,"\n"):b.replace(/[ \t\r\n]+/g,
" "),m[l]=b,q[l<<1]=E,E+=b.length,q[l++<<1|1]=a)}var c=/(?:^|\s)nocode(?:\s|$)/,m=[],E=0,q=[],l=0;f(a);return{a:m.join("").replace(/\n$/,""),c:q}}function J(a,d,f,c,m){f&&(a={h:a,l:1,j:null,m:null,a:f,c:null,i:d,g:null},c(a),m.push.apply(m,a.g))}function V(a){for(var d=void 0,f=a.firstChild;f;f=f.nextSibling)var c=f.nodeType,d=1===c?d?a:f:3===c?W.test(f.nodeValue)?a:d:d;return d===a?void 0:d}function G(a,d){function f(a){for(var l=a.i,n=a.h,b=[l,"pln"],p=0,q=a.a.match(m)||[],r={},e=0,t=q.length;e<
t;++e){var z=q[e],v=r[z],g=void 0,h;if("string"===typeof v)h=!1;else{var k=c[z.charAt(0)];if(k)g=z.match(k[1]),v=k[0];else{for(h=0;h<E;++h)if(k=d[h],g=z.match(k[1])){v=k[0];break}g||(v="pln")}!(h=5<=v.length&&"lang-"===v.substring(0,5))||g&&"string"===typeof g[1]||(h=!1,v="src");h||(r[z]=v)}k=p;p+=z.length;if(h){h=g[1];var A=z.indexOf(h),C=A+h.length;g[2]&&(C=z.length-g[2].length,A=C-h.length);v=v.substring(5);J(n,l+k,z.substring(0,A),f,b);J(n,l+k+A,h,K(v,h),b);J(n,l+k+C,z.substring(C),f,b)}else b.push(l+
k,v)}a.g=b}var c={},m;(function(){for(var f=a.concat(d),l=[],n={},b=0,p=f.length;b<p;++b){var w=f[b],r=w[3];if(r)for(var e=r.length;0<=--e;)c[r.charAt(e)]=w;w=w[1];r=""+w;n.hasOwnProperty(r)||(l.push(w),n[r]=null)}l.push(/[\0-\uffff]/);m=T(l)})();var E=d.length;return f}function x(a){var d=[],f=[];a.tripleQuotedStrings?d.push(["str",/^(?:\'\'\'(?:[^\'\\]|\\[\s\S]|\'{1,2}(?=[^\']))*(?:\'\'\'|$)|\"\"\"(?:[^\"\\]|\\[\s\S]|\"{1,2}(?=[^\"]))*(?:\"\"\"|$)|\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$))/,
null,"'\""]):a.multiLineStrings?d.push(["str",/^(?:\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$)|\`(?:[^\\\`]|\\[\s\S])*(?:\`|$))/,null,"'\"`"]):d.push(["str",/^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$)|\"(?:[^\\\"\r\n]|\\.)*(?:\"|$))/,null,"\"'"]);a.verbatimStrings&&f.push(["str",/^@\"(?:[^\"]|\"\")*(?:\"|$)/,null]);var c=a.hashComments;c&&(a.cStyleComments?(1<c?d.push(["com",/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,null,"#"]):d.push(["com",/^#(?:(?:define|e(?:l|nd)if|else|error|ifn?def|include|line|pragma|undef|warning)\b|[^\r\n]*)/,
null,"#"]),f.push(["str",/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h(?:h|pp|\+\+)?|[a-z]\w*)>/,null])):d.push(["com",/^#[^\r\n]*/,null,"#"]));a.cStyleComments&&(f.push(["com",/^\/\/[^\r\n]*/,null]),f.push(["com",/^\/\*[\s\S]*?(?:\*\/|$)/,null]));if(c=a.regexLiterals){var m=(c=1<c?"":"\n\r")?".":"[\\S\\s]";f.push(["lang-regex",RegExp("^(?:^^\\.?|[+-]|[!=]=?=?|\\#|%=?|&&?=?|\\(|\\*=?|[+\\-]=|->|\\/=?|::?|<<?=?|>>?>?=?|,|;|\\?|@|\\[|~|{|\\^\\^?=?|\\|\\|?=?|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\\s*("+
("/(?=[^/*"+c+"])(?:[^/\\x5B\\x5C"+c+"]|\\x5C"+m+"|\\x5B(?:[^\\x5C\\x5D"+c+"]|\\x5C"+m+")*(?:\\x5D|$))+/")+")")])}(c=a.types)&&f.push(["typ",c]);c=(""+a.keywords).replace(/^ | $/g,"");c.length&&f.push(["kwd",new RegExp("^(?:"+c.replace(/[\s,]+/g,"|")+")\\b"),null]);d.push(["pln",/^\s+/,null," \r\n\t\u00a0"]);c="^.[^\\s\\w.$@'\"`/\\\\]*";a.regexLiterals&&(c+="(?!s*/)");f.push(["lit",/^@[a-z_$][a-z_$@0-9]*/i,null],["typ",/^(?:[@_]?[A-Z]+[a-z][A-Za-z_$@0-9]*|\w+_t\b)/,null],["pln",/^[a-z_$][a-z_$@0-9]*/i,
null],["lit",/^(?:0x[a-f0-9]+|(?:\d(?:_\d+)*\d*(?:\.\d*)?|\.\d\+)(?:e[+\-]?\d+)?)[a-z]*/i,null,"0123456789"],["pln",/^\\[\s\S]?/,null],["pun",new RegExp(c),null]);return G(d,f)}function L(a,d,f){function c(a){var b=a.nodeType;if(1==b&&!t.test(a.className))if("br"===a.nodeName.toLowerCase())m(a),a.parentNode&&a.parentNode.removeChild(a);else for(a=a.firstChild;a;a=a.nextSibling)c(a);else if((3==b||4==b)&&f){var e=a.nodeValue,d=e.match(q);d&&(b=e.substring(0,d.index),a.nodeValue=b,(e=e.substring(d.index+
d[0].length))&&a.parentNode.insertBefore(l.createTextNode(e),a.nextSibling),m(a),b||a.parentNode.removeChild(a))}}function m(a){function c(a,b){var e=b?a.cloneNode(!1):a,k=a.parentNode;if(k){var k=c(k,1),d=a.nextSibling;k.appendChild(e);for(var f=d;f;f=d)d=f.nextSibling,k.appendChild(f)}return e}for(;!a.nextSibling;)if(a=a.parentNode,!a)return;a=c(a.nextSibling,0);for(var e;(e=a.parentNode)&&1===e.nodeType;)a=e;b.push(a)}for(var t=/(?:^|\s)nocode(?:\s|$)/,q=/\r\n?|\n/,l=a.ownerDocument,n=l.createElement("li");a.firstChild;)n.appendChild(a.firstChild);
for(var b=[n],p=0;p<b.length;++p)c(b[p]);d===(d|0)&&b[0].setAttribute("value",d);var w=l.createElement("ol");w.className="linenums";d=Math.max(0,d-1|0)||0;for(var p=0,r=b.length;p<r;++p)n=b[p],n.className="L"+(p+d)%10,n.firstChild||n.appendChild(l.createTextNode("\u00a0")),w.appendChild(n);a.appendChild(w)}function t(a,d){for(var f=d.length;0<=--f;){var c=d[f];I.hasOwnProperty(c)?D.console&&console.warn("cannot override language handler %s",c):I[c]=a}}function K(a,d){a&&I.hasOwnProperty(a)||(a=/^\s*</.test(d)?
"default-markup":"default-code");return I[a]}function M(a){var d=a.j;try{var f=U(a.h,a.l),c=f.a;a.a=c;a.c=f.c;a.i=0;K(d,c)(a);var m=/\bMSIE\s(\d+)/.exec(navigator.userAgent),m=m&&8>=+m[1],d=/\n/g,t=a.a,q=t.length,f=0,l=a.c,n=l.length,c=0,b=a.g,p=b.length,w=0;b[p]=q;var r,e;for(e=r=0;e<p;)b[e]!==b[e+2]?(b[r++]=b[e++],b[r++]=b[e++]):e+=2;p=r;for(e=r=0;e<p;){for(var x=b[e],z=b[e+1],v=e+2;v+2<=p&&b[v+1]===z;)v+=2;b[r++]=x;b[r++]=z;e=v}b.length=r;var g=a.h;a="";g&&(a=g.style.display,g.style.display="none");
try{for(;c<n;){var h=l[c+2]||q,k=b[w+2]||q,v=Math.min(h,k),A=l[c+1],C;if(1!==A.nodeType&&(C=t.substring(f,v))){m&&(C=C.replace(d,"\r"));A.nodeValue=C;var N=A.ownerDocument,u=N.createElement("span");u.className=b[w+1];var B=A.parentNode;B.replaceChild(u,A);u.appendChild(A);f<h&&(l[c+1]=A=N.createTextNode(t.substring(v,h)),B.insertBefore(A,u.nextSibling))}f=v;f>=h&&(c+=2);f>=k&&(w+=2)}}finally{g&&(g.style.display=a)}}catch(y){D.console&&console.log(y&&y.stack||y)}}var D="undefined"!==typeof window?
window:{},B=["break,continue,do,else,for,if,return,while"],F=[[B,"auto,case,char,const,default,double,enum,extern,float,goto,inline,int,long,register,restrict,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"],"catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"],H=[F,"alignas,alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,delegate,dynamic_cast,explicit,export,friend,generic,late_check,mutable,namespace,noexcept,noreturn,nullptr,property,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"],
O=[F,"abstract,assert,boolean,byte,extends,finally,final,implements,import,instanceof,interface,null,native,package,strictfp,super,synchronized,throws,transient"],P=[F,"abstract,add,alias,as,ascending,async,await,base,bool,by,byte,checked,decimal,delegate,descending,dynamic,event,finally,fixed,foreach,from,get,global,group,implicit,in,interface,internal,into,is,join,let,lock,null,object,out,override,orderby,params,partial,readonly,ref,remove,sbyte,sealed,select,set,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,value,var,virtual,where,yield"],
F=[F,"abstract,async,await,constructor,debugger,enum,eval,export,from,function,get,import,implements,instanceof,interface,let,null,of,set,undefined,var,with,yield,Infinity,NaN"],Q=[B,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"],R=[B,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"],
B=[B,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"],S=/^(DIR|FILE|array|vector|(de|priority_)?queue|(forward_)?list|stack|(const_)?(reverse_)?iterator|(unordered_)?(multi)?(set|map)|bitset|u?(int|float)\d*)\b/,W=/\S/,X=x({keywords:[H,P,O,F,"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",Q,R,B],hashComments:!0,cStyleComments:!0,multiLineStrings:!0,regexLiterals:!0}),
I={};t(X,["default-code"]);t(G([],[["pln",/^[^<?]+/],["dec",/^<!\w[^>]*(?:>|$)/],["com",/^<\!--[\s\S]*?(?:-\->|$)/],["lang-",/^<\?([\s\S]+?)(?:\?>|$)/],["lang-",/^<%([\s\S]+?)(?:%>|$)/],["pun",/^(?:<[%?]|[%?]>)/],["lang-",/^<xmp\b[^>]*>([\s\S]+?)<\/xmp\b[^>]*>/i],["lang-js",/^<script\b[^>]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-css",/^<style\b[^>]*>([\s\S]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),"default-markup htm html mxml xhtml xml xsl".split(" "));t(G([["pln",/^[\s]+/,
null," \t\r\n"],["atv",/^(?:\"[^\"]*\"?|\'[^\']*\'?)/,null,"\"'"]],[["tag",/^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],["atn",/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],["pun",/^[=<>\/]+/],["lang-js",/^on\w+\s*=\s*\"([^\"]+)\"/i],["lang-js",/^on\w+\s*=\s*\'([^\']+)\'/i],["lang-js",/^on\w+\s*=\s*([^\"\'>\s]+)/i],["lang-css",/^style\s*=\s*\"([^\"]+)\"/i],["lang-css",/^style\s*=\s*\'([^\']+)\'/i],["lang-css",/^style\s*=\s*([^\"\'>\s]+)/i]]),["in.tag"]);
t(G([],[["atv",/^[\s\S]+/]]),["uq.val"]);t(x({keywords:H,hashComments:!0,cStyleComments:!0,types:S}),"c cc cpp cxx cyc m".split(" "));t(x({keywords:"null,true,false"}),["json"]);t(x({keywords:P,hashComments:!0,cStyleComments:!0,verbatimStrings:!0,types:S}),["cs"]);t(x({keywords:O,cStyleComments:!0}),["java"]);t(x({keywords:B,hashComments:!0,multiLineStrings:!0}),["bash","bsh","csh","sh"]);t(x({keywords:Q,hashComments:!0,multiLineStrings:!0,tripleQuotedStrings:!0}),["cv","py","python"]);t(x({keywords:"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",
hashComments:!0,multiLineStrings:!0,regexLiterals:2}),["perl","pl","pm"]);t(x({keywords:R,hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["rb","ruby"]);t(x({keywords:F,cStyleComments:!0,regexLiterals:!0}),["javascript","js","ts","typescript"]);t(x({keywords:"all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,throw,true,try,unless,until,when,while,yes",hashComments:3,cStyleComments:!0,multilineStrings:!0,tripleQuotedStrings:!0,
regexLiterals:!0}),["coffee"]);t(G([],[["str",/^[\s\S]+/]]),["regex"]);var Y=D.PR={createSimpleLexer:G,registerLangHandler:t,sourceDecorator:x,PR_ATTRIB_NAME:"atn",PR_ATTRIB_VALUE:"atv",PR_COMMENT:"com",PR_DECLARATION:"dec",PR_KEYWORD:"kwd",PR_LITERAL:"lit",PR_NOCODE:"nocode",PR_PLAIN:"pln",PR_PUNCTUATION:"pun",PR_SOURCE:"src",PR_STRING:"str",PR_TAG:"tag",PR_TYPE:"typ",TAOSDATA_FUNCTION:"td-fun",TAOSDATA_DATATYPE:"td-dtp",TAOSDATA_TERMINAL:"tem",TAOSDATA_OPTION:"td-opt",prettyPrintOne:D.prettyPrintOne=function(a,d,f){f=f||!1;d=d||null;var c=document.createElement("div");c.innerHTML="<pre>"+a+"</pre>";
c=c.firstChild;f&&L(c,f,!0);M({j:d,m:f,h:c,l:1,a:null,i:null,c:null,g:null});return c.innerHTML},prettyPrint:D.prettyPrint=function(a,d){function f(){for(var c=D.PR_SHOULD_USE_CONTINUATION?b.now()+250:Infinity;p<x.length&&b.now()<c;p++){for(var d=x[p],l=g,n=d;n=n.previousSibling;){var m=n.nodeType,u=(7===m||8===m)&&n.nodeValue;if(u?!/^\??prettify\b/.test(u):3!==m||/\S/.test(n.nodeValue))break;if(u){l={};u.replace(/\b(\w+)=([\w:.%+-]+)/g,function(a,b,c){l[b]=c});break}}n=d.className;if((l!==g||r.test(n))&&
!e.test(n)){m=!1;for(u=d.parentNode;u;u=u.parentNode)if(v.test(u.tagName)&&u.className&&r.test(u.className)){m=!0;break}if(!m){d.className+=" prettyprinted";m=l.lang;if(!m){var m=n.match(w),q;!m&&(q=V(d))&&z.test(q.tagName)&&(m=q.className.match(w));m&&(m=m[1])}if(B.test(d.tagName))u=1;else var u=d.currentStyle,y=t.defaultView,u=(u=u?u.whiteSpace:y&&y.getComputedStyle?y.getComputedStyle(d,null).getPropertyValue("white-space"):0)&&"pre"===u.substring(0,3);y=l.linenums;(y="true"===y||+y)||(y=(y=n.match(/\blinenums\b(?::(\d+))?/))?
y[1]&&y[1].length?+y[1]:!0:!1);y&&L(d,y,u);M({j:m,h:d,m:y,l:u,a:null,i:null,c:null,g:null})}}}p<x.length?D.setTimeout(f,250):"function"===typeof a&&a()}for(var c=d||document.body,t=c.ownerDocument||document,c=[c.getElementsByTagName("pre"),c.getElementsByTagName("code"),c.getElementsByTagName("xmp")],x=[],q=0;q<c.length;++q)for(var l=0,n=c[q].length;l<n;++l)x.push(c[q][l]);var c=null,b=Date;b.now||(b={now:function(){return+new Date}});var p=0,w=/\blang(?:uage)?-([\w.]+)(?!\S)/,r=/\bprettyprint\b/,
e=/\bprettyprinted\b/,B=/pre|xmp/i,z=/^code$/i,v=/^(?:pre|code|xmp)$/i,g={};f()}},H=D.define;"function"===typeof H&&H.amd&&H("google-code-prettify",[],function(){return Y})})();}()

View File

@ -1,31 +0,0 @@
PR['registerLangHandler'](
PR['createSimpleLexer'](
[
// Whitespace
[PR['PR_PLAIN'], /^[\t\n\r \xA0]+/, null, '\t\n\r \xA0'],
// A double or single quoted, possibly multi-line, string.
[PR['PR_STRING'], /^(?:"(?:[^\"\\]|\\.)*"|'(?:[^\'\\]|\\.)*')/, null,
'"\'']
],
[
// A comment is either a line comment that starts with two dashes, or
// two dashes preceding a long bracketed block.
[PR['PR_COMMENT'], /^(?:--[^\r\n]*|\/\*[\s\S]*?(?:\*\/|$))/],
[PR['PR_KEYWORD'], /^(?:ADD|ALL|ALTER|AND|ANY|APPLY|AS|ASC|AUTHORIZATION|BACKUP|BEGIN|BETWEEN|BREAK|BROWSE|BULK|BY|CASCADE|CASE|CHECK|CHECKPOINT|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMN|COMMIT|COMPUTE|CONNECT|CONSTRAINT|CONTAINS|CONTAINSTABLE|CONTINUE|CONVERT|CREATE|CROSS|CURRENT|CURRENT_DATE|CURRENT_TIME|CURRENT_TIMESTAMP|CURRENT_USER|CURSOR|DATABASE|DBCC|DEALLOCATE|DECLARE|DEFAULT|DELETE|DENY|DESC|DISK|DISTINCT|DISTRIBUTED|DROP|DUMMY|DUMP|ELSE|END|ERRLVL|ESCAPE|EXCEPT|EXEC|EXECUTE|EXISTS|EXIT|FETCH|FILE|FILL|FILLFACTOR|FOLLOWING|FOR|FOREIGN|FREETEXT|FREETEXTTABLE|FROM|FULL|FUNCTION|GOTO|GRANT|GROUP|HAVING|HOLDLOCK|IDENTITY|IDENTITYCOL|IDENTITY_INSERT|IF|IN|INDEX|INNER|INSERT|INTERSECT|INTO|IS|JOIN|KEY|KILL|LEFT|LIKE|LINENO|LOAD|MATCH|MATCHED|MERGE|NATURAL|NATIONAL|NOCHECK|NONCLUSTERED|NOCYCLE|NOT|NULL|NULLIF|OF|OFF|OFFSETS|ON|OPEN|OPENDATASOURCE|OPENQUERY|OPENROWSET|OPENXML|OPTION|OR|ORDER|OUTER|OVER|PARTITION|PERCENT|PIVOT|PLAN|PRECEDING|PRIMARY|PRINT|PROC|PROCEDURE|PUBLIC|RAISERROR|READ|READTEXT|RECONFIGURE|REFERENCES|REPLICATION|RESTORE|RESTRICT|RETURN|REVOKE|RIGHT|ROLLBACK|ROWCOUNT|ROWGUIDCOL|RULE|SAVE|SCHEMA|SELECT|SESSION_USER|SET|SETUSER|SHUTDOWN|SLIDING|SOME|START|STATISTICS|SYSTEM_USER|TABLE|TAGS|TEXTSIZE|THEN|TO|TRAN|TRANSACTION|TRIGGER|TRUNCATE|TSEQUAL|UNBOUNDED|UNION|UNIQUE|UNPIVOT|UPDATE|UPDATETEXT|USE|USER|USING|VALUES|VARYING|VIEW|WAITFOR|WHEN|WHERE|WHILE|WITH|WITHIN|WRITETEXT|XML|ID|STRING|INTEGER|OR|AND|NOT|EQ|NE|ISNULL|NOTNULL|IS|LIKE|GLOB|BETWEEN|IN|GT|GE|LT|LE|BITAND|BITOR|LSHIFT|RSHIFT|PLUS|MINUS|DIVIDE|TIMES|STAR|SLASH|REM|CONCAT|UMINUS|UPLUS|BITNOT|SHOW|DATABASES|MNODES|USERS|MODULES|QUERIES|CONNECTIONS|STREAMS|CONFIGS|SCORES|GRANTS|DOT|TABLES|METRICS|VGROUPS|DROP|TABLE|DATABASE|IP|USER|USE|DESCRIBE|ALTER|PASS|PRIVILEGE|LOCAL|IF|EXISTS|REPLICA|DAYS|KEEP|ROWS|CACHE|ABLOCKS|TBLOCKS|CTIME|CLOG|COMP|LP|RP|TAGS|USING|AS|COMMA|SELECT|FROM|VARIABLE|INTERVAL|FILL|SLIDING|ORDER|BY|ASC|DESC|GROUP|LIMIT|OFFSET|WHERE|NOW|INSERT|INTO|VALUES|RESET|QUERY|ADD|COLUMN|TAG|CHANGE|SET|KILL|CONNECTION|STREAM|ABORT|AFTER|ATTACH|BEFORE|BEGIN|CASCADE|CLUSTER|CONFLICT|COPY|DEFERRED|DELIMITERS|DETACH|EACH|END|EXPLAIN|FAIL|FOR|IGNORE|IMMEDIATE|INITIALLY|INSTEAD|MATCH|KEY|OF|RAISE|REPLACE|RESTRICT|ROW|STATEMENT|TRIGGER|VIEW|ALL|SEMI|NONE|PREV|LINEAR|IMPORT|METRIC|TBNAME|JOIN|STABLE|STABLES|SLIMIT|SOFFSET|HAVING|PRECISION|STREAMS|NULL)(?=[^\w-]|$)/i, null],
//
[PR['TAOSDATA_FUNCTION'], /^(?:"APERCENTILE|AVG|BOTTOM|COUNT|DIFF|FIRST|HISTOGRAM|INTERP|LAST|LAST_ROW|LEASTSQUARES|MAX|MIN|PERCENTILE|SPREAD|STDDEV|SUM|TOP|WAVG")(?=[^\w-]|$)/i, null],
[PR['TAOSDATA_OPTION'],
/^(?:ABLOCKS|CACHE|CLOG|COMP|CTIME|DAYS|KEEP|PRECISION|REPLICA|ROWS|TABLES|TBLOCKS)(?=[^\w-]|$)/i,null],
[PR['TAOSDATA_DATATYPE'],
/^(?:BIGINT|BINARY|BOOL|DOUBLE|FLOAT|INT|NCHAR|SMALLINT|TINYINT[^\w-]|$)/i, null],
// A number is a hex integer literal, a decimal real literal, or in
// scientific notation.
[PR['PR_LITERAL'],
/^[+-]?(?:0x[\da-f]+|(?:(?:\.\d+|\d+(?:\.\d*)?)(?:e[+\-]?\d+)?))/i],
// An identifier
[PR['PR_PLAIN'], /^[a-z_][\w-]*/i],
// A run of punctuation
[PR['PR_PUNCTUATION'], /^[^\w\t\n\r \xA0\"\'][^\w\t\n\r \xA0+\-\"\']*/]
]
),
['sql','mysql']);

View File

@ -1,9 +0,0 @@
PR['registerLangHandler'](
PR['createSimpleLexer'](
[
// A comment is either a line comment that starts with two dashes, or
// two dashes preceding a long bracketed block.
[PR['TAOSDATA_TERMINAL'], /^(*?)/]
]
),
['terminal','term']);

View File

@ -1,231 +0,0 @@
pre.prettyprint ol {
list-style-type: none;
margin-left: 0;
}
pre.prettyprint ol > li {
counter-increment: customlistcounter;
}
pre.prettyprint ol > li:first-child:before {
border-top-left-radius: 0.25rem;
}
pre.prettyprint ol > li:last-child:before {
border-bottom-left-radius: 0.25rem;
}
pre.prettyprint ol > li:before {
content: counter(customlistcounter) " ";
font-weight: 300;
display: inline-block;
position: absolute;
transform:translateX(-38px);
width: 27px;
text-align: right;
background-color:var(--white);
padding-bottom: 0.1px;
}
pre.prettyprint ol > li:nth-last-child(1)::before {
padding-bottom: 0px !important;
}
pre.prettyprint.twoc ol > li:before {
transform:translateX(-45px);
width:34px;
}
pre.prettyprint.twoc {
padding-left:35px;
}
pre.prettyprint.threec ol > li:before {
transform:translateX(-53px);
width:42px;
}
pre.prettyprint.threec {
padding-left:43px;
}
ol:first-child {
counter-reset: customlistcounter;
}
pre.prettyprint ol {
padding-right: 4px;
}
pre .atn,
pre .kwd,
pre .tag {
font-weight: 400
}
pre * {
font-family:monospace;
}
pre.prettyprint li {
background-color:rgb(244,245,246);
}
pre.prettyprint {
display: block;
background-color:rgb(244,245,246);
border-radius:0.25rem;
padding-left: 27px;
/*each additional digit needs 8px*/
width:100%;
border:1px solid #e7eaed;
color:#d58936;
}
/* TAOSDATA Specific */
pre.lang-blank span {
color:var(--sg1);
}
pre.lang-blank {
}
pre.lang-term span{
color: var(--white) ;
}
pre.lang-term ol {
background-color: var(--sg1);
}
pre.lang-term ol.linenums {
border-left:1px solid var(--sg1);
}
pre.lang-term li {
background-color:var(--sg1);
}
/*Functions*/
pre .td-fun {
color:#f24352;
}
/*Options*/
pre .td-opt {
/*color:mediumpurple;*/
color:#5882bc;
}
/*Datatypes*/
pre .td-dtp {
color:darkcyan;
}
pre .nocode {
background-color: var(--white);
color: var(--sg1);
}
/*Strings*/
pre .str {
color: #690;
}
/*Keywords*/
pre .kwd {
color: #5882bc;
}
/*Comments*/
pre .com {
color: slategray;
}
/*Type*/
pre .typ {
color: #9c5fc6;
}
/*Literals*/
pre .lit {
color: #91001f;
}
/*Plain Text*/
pre .pln {
color: #d58936;
}
/*Punctuation*/
pre .pun {
color: rgb(51,66,78);
}
pre .tag {
color: khaki
}
pre .atn {
color: #bdb76b
}
pre .atv {
color: #ffa0a0
}
pre .dec {
color: #98fb98
}
ol.linenums {
margin-top: 0;
margin-bottom: 0;
color: #AEAEAE;
border-left:1px solid var(--b1);
padding-left: 0px;
}
pre li {
padding-left: 0.6rem;
}
li.L0,
li.L1,
li.L2,
li.L3,
li.L5,
li.L6,
li.L7,
li.L8 {
list-style-type: none
}
@media print {
pre.prettyprint {
background-color: none
}
code .str,
pre .str {
color: #690;
}
code .kwd,
pre .kwd {
color: #5882bc;
font-weight: 400
}
code .com,
pre .com {
color: #600;
font-style: italic
}
code .typ,
pre .typ {
color: #404;
font-weight: 400
}
code .lit,
pre .lit {
color: #044
}
code .pun,
pre .pun {
color: #440
}
code .pln,
pre .pln {
color: #000
}
code .tag,
pre .tag {
color: #006;
font-weight: 400
}
code .atn,
pre .atn {
color: #404
}
code .atv,
pre .atv {
color: #060
}
}

File diff suppressed because one or more lines are too long

View File

@ -1,159 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/more-on-system-architecture-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/more-on-system-architecture-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>TDengine的技术设计</h1>
<a class='anchor' id='存储设计'></a><h2>存储设计</h2>
<p>TDengine的数据存储主要包含<strong>元数据的存储</strong><strong>写入数据的存储</strong>。以下章节详细介绍了TDengine各种数据的存储结构。</p>
<a class='anchor' id='元数据的存储'></a><h3>元数据的存储</h3>
<p>TDengine中的元数据信息包括TDengine中的数据库超级表等信息。元数据信息默认存放在 <em>/var/lib/taos/mgmt/</em> 文件夹下。该文件夹的目录结构如下所示:</p>
<pre><code>/var/lib/taos/
+--mgmt/
+--db.db
+--meters.db
+--user.db
+--vgroups.db</code></pre>
<p>元数据在文件中按顺序排列。文件中的每条记录代表TDengine中的一个元数据机构数据库、表等。元数据文件只进行追加操作即便是元数据的删除也只是在数据文件中追加一条删除的记录。</p>
<a class='anchor' id='写入数据的存储'></a><h3>写入数据的存储</h3>
<p>TDengine中写入的数据在硬盘上是按时间维度进行分片的。同一个vnode中的表在同一时间范围内的数据都存放在同一文件组中如下图中的v0f1804*文件。这一数据分片方式可以大大简化数据在时间维度的查询提高查询速度。在默认配置下硬盘上的每个文件存放10天数据。用户可根据需要调整数据库的 <em>daysPerFile</em> 配置项进行配置。 数据在文件中是按块存储的。每个数据块只包含一张表的数据且数据是按照时间主键递增排列的。数据在数据块中按列存储这样使得同类型的数据存放在一起可以大大提高压缩的比例节省存储空间。TDengine对不同类型的数据采用了不同的压缩算法进行压缩以达到最优的压缩结果。TDengine使用的压缩算法包括simple8B、delta-of-delta、RLE以及LZ4等。</p>
<p>TDengine的数据文件默认存放在 <em>/var/lib/taos/data/</em> 下。而 <em>/var/lib/taos/tsdb/</em> 文件夹下存放了vnode的信息、vnode中表的信息以及数据文件的链接等。其完整目录结构如下所示</p>
<pre><code>/var/lib/taos/
+--tsdb/
| +--vnode0
| +--meterObj.v0
| +--db/
| +--v0f1804.head-&gt;/var/lib/taos/data/vnode0/v0f1804.head1
| +--v0f1804.data-&gt;/var/lib/taos/data/vnode0/v0f1804.data
| +--v0f1804.last-&gt;/var/lib/taos/data/vnode0/v0f1804.last1
| +--v0f1805.head-&gt;/var/lib/taos/data/vnode0/v0f1805.head1
| +--v0f1805.data-&gt;/var/lib/taos/data/vnode0/v0f1805.data
| +--v0f1805.last-&gt;/var/lib/taos/data/vnode0/v0f1805.last1
| :
+--data/
+--vnode0/
+--v0f1804.head1
+--v0f1804.data
+--v0f1804.last1
+--v0f1805.head1
+--v0f1805.data
+--v0f1805.last1
:</code></pre>
<h4>meterObj文件</h4>
<p>每个vnode中只存在一个 <em>meterObj</em> 文件。该文件中存储了vnode的基本信息创建时间配置信息vnode的统计信息等以及该vnode中表的信息。其结构如下所示</p>
<pre><code>&lt;文件开始&gt;
[文件头]
[表记录1偏移量和长度]
[表记录2偏移量和长度]
...
[表记录N偏移量和长度]
[表记录1]
[表记录2]
...
[表记录N]
[表记录]
&lt;文件结尾&gt;</code></pre>
<p>其中文件头大小为512字节主要存放vnode的基本信息。每条表记录代表属于该vnode中的一张表在硬盘上的表示。</p>
<h4>head文件</h4>
<p>head文件中存放了其对应的data文件中数据块的索引信息。该文件组织形式如下</p>
<pre><code>&lt;文件开始&gt;
[文件头]
[表1偏移量]
[表2偏移量]
...
[表N偏移量]
[表1数据索引]
[表2数据索引]
...
[表N数据索引]
&lt;文件结尾&gt;</code></pre>
<p>文件开头的偏移量列表表示对应表的数据索引块的开始位置在文件中的偏移量。每张表的数据索引信息在head文件中都是连续存放的。这也使得TDengine在读取单表数据时可以将该表所有的数据块索引一次性读入内存大大提高读取速度。表的数据索引块组织如下</p>
<pre><code>[索引块信息]
[数据块1索引]
[数据块2索引]
...
[数据块N索引]</code></pre>
<p>其中索引块信息中记录了数据块的个数等描述信息。每个数据块索引对应一个在data文件或last文件中的一个单独的数据块。索引信息中记录了数据块存放的文件、数据块起始位置的偏移量、数据块中数据时间主键的范围等。索引块中的数据块索引是按照时间范围顺序排放的这也就是说索引块M对应的数据块中的数据时间范围都大于索引块M-1的。这种预先排序的存储方式使得在TDengine在进行按照时间戳进行查询时可以使用折半查找算法大大提高查询速度。</p>
<h4>data文件</h4>
<p>data文件中存放了真实的数据块。该文件只进行追加操作。其文件组织形式如下</p>
<pre><code>&lt;文件开始&gt;
[文件头]
[数据块1]
[数据块2]
...
[数据块N]
&lt;文件结尾&gt;</code></pre>
<p>每个数据块只属于vnode中的一张表且数据块中的数据按照时间主键排列。数据块中的数据按列组织排放使得同一类型的数据排放在一起方便压缩和读取。每个数据块的组织形式如下所示</p>
<pre><code>[列1信息]
[列2信息]
...
[列N信息]
[列1数据]
[列2数据]
...
[列N数据]</code></pre>
<p>列信息中包含该列的类型,列的压缩算法,列数据在文件中的偏移量以及长度等。除此之外,列信息中也包含该内存块中该列数据的预计算结果,从而在过滤查询时根据预计算结果判定是否读取数据块,大大提高读取速度。</p>
<h4>last文件</h4>
<p>为了防止数据块的碎片化提高查询速度和压缩率TDengine引入了last文件。当要落盘的数据块中的数据条数低于某个阈值时TDengine会先将该数据块写入到last文件中进行暂时存储。当有新的数据需要落盘时last文件中的数据会被读取出来与新数据组成新的数据块写入到data文件中。last文件的组织形式与data文件类似。</p>
<a class='anchor' id='TDengine数据存储小结'></a><h3>TDengine数据存储小结</h3>
<p>TDengine通过其创新的架构和存储结构设计有效提高了计算机资源的使用率。一方面TDengine的虚拟化使得TDengine的水平扩展及备份非常容易。另一方面TDengine将表中数据按时间主键排序存储且其列式存储的组织形式都使TDengine在写入、查询以及压缩方面拥有非常大的优势。</p>
<a class='anchor' id='查询处理'></a><h2>查询处理</h2>
<a class='anchor' id='概述'></a><h3>概述</h3>
<p>TDengine提供了多种多样针对表和超级表的查询处理功能除了常规的聚合查询之外还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、管理节点、数据节点协同完成。 各组件包含的与查询处理相关的功能和模块如下:</p>
<p>客户端Client App。客户端包含TAOS SQL的解析SQL Parser和查询请求执行器Query Executor第二阶段聚合器Result Merger连续查询管理器Continuous Query Manager等主要功能模块构成。SQL解析器负责对SQL语句进行解析校验并转化为抽象语法树查询执行器负责将抽象语法树转化查询执行逻辑并根据SQL语句查询条件将其转换为针对管理节点元数据查询和针对数据节点的数据查询两级查询处理。由于TAOS SQL当前不提供复杂的嵌套查询和pipeline查询处理机制所以不再需要查询计划优化、逻辑查询计划到物理查询计划转换等过程。第二阶段聚合器负责将各数据节点查询返回的独立结果进行二阶段聚合生成最后的结果。连续查询管理器则负责针对用户建立的连续查询进行管理负责定时拉起查询请求并按需将结果写回TDengine或返回给客户应用。此外客户端还负责查询失败后重试、取消查询请求、以及维持连接心跳、向管理节点上报查询状态等工作。</p>
<p>管理节点Management Node。管理节点保存了整个集群系统的全部数据的元数据信息向客户端节点提供查询所需的数据的元数据并根据集群的负载情况切分查询请求。通过超级表包含了通过该超级表创建的所有表的信息因此查询处理器Query Executor负责针对标签TAG的查询处理并将满足标签查询请求的表信息返回给客户端。此外管理节点还维护集群的查询状态Query Status Manager维护查询状态管理中在内存中临时保存有当前正在执行的全部查询当客户端使用 <em>show queries</em> 命令的时候,将当前系统正在运行的查询信息返回客户端。</p>
<p>数据节点Data Node。数据节点保存了数据库中全部数据内容并通过查询执行器、查询处理调度器、查询任务队列Query Task Queue进行查询处理的调度执行从客户端接收到的查询处理请求都统一放置到处理队列中查询执行器从队列中获得查询请求并负责执行。通过查询优化器Query Optimizer对于查询进行基本的优化处理以及通过数据节点的查询执行器Query Executor扫描符合条件的数据单元并返回计算结果。等接收客户端发出的查询请求执行查询处理并将结果返回。同时数据节点还需要响应来自管理节点的管理信息和命令例如 <em>kill query</em> 命令以后,需要即刻停止执行的查询任务。</p>
<p><center> <img src="../assets/fig1.png"> </center>
<center>图 1. 系统查询处理架构图(只包含查询相关组件)</center></p>
<a class='anchor' id='普通查询处理'></a><h3>普通查询处理</h3>
<p>客户端、管理节点、数据节点协同完成TDengine的查询处理全流程。我们以一个具体的SQL查询为例说明TDengine的查询处理流程。SQL语句向超级表<em>FOO_SUPER_TABLE</em>查询获取时间范围在2019年1月12日整天标签TAG_LOC是'beijing'的表所包含的所有记录总数SQL语句如下</p>
<pre><code class="sql language-sql">SELECT COUNT(*)
FROM FOO_SUPER_TABLE
WHERE TAG_LOC = 'beijing' AND TS &gt;= '2019-01-12 00:00:00' AND TS &lt; '2019-01-13 00:00:00'</code></pre>
<p>首先客户端调用TAOS SQL解析器对SQL语句进行解析及合法性检查然后生成语法树并从中提取查询的对象 — 超级表 <em>FOO_SUPER_TABLE</em> 然后解析器向管理节点Management Node请求其相应的元数据信息并将过滤信息TAG_LOC='beijing')同时发送到管理节点。</p>
<p>管理节点接收元数据获取的请求,首先找到超级表 <em>FOO_SUPER_TABLE</em> 基础信息然后应用查询条件来过滤通过该超级表创建的全部表最后满足查询条件TAG_LOC='beijing'),即 <em>TAG_LOC</em> 标签列是 'beijing' 的的通过其查询执行器将满足查询要求的对象(表或超级表)的元数据信息返回给客户端。</p>
<p>客户端获得了 <em>FOO_SUPER_TABLE</em> 的元数据信息后查询执行器根据元数据中的数据分布分别向保存有相应数据的节点发起查询请求此时时间戳范围过滤条件TS &gt;= '2019-01-12 00:00:00' AND TS &lt; '2019-01-13 00:00:00')需要同时发送给全部的数据节点。</p>
<p>数据节点接收到发自客户端的查询,转化为内部结构并进行优化以后将其放入任务执行队列,等待查询执行器执行。当查询结果获得以后,将查询结果返回客户端。数据节点执行查询的过程均相互独立,完全只依赖于自身的数据和内容进行计算。</p>
<p>当所有查询涉及的数据节点返回结果后,客户端将每个数据节点查询的结果集再次进行聚合(针对本案例,即将所有结果再次进行累加),累加的结果即为最后的查询结果。第二阶段聚合并不是所有的查询都需要。例如,针对数据的列选取操作,实际上是不需要第二阶段聚合。</p>
<a class='anchor' id='REST查询处理'></a><h3>REST查询处理</h3>
<p>在 C/C++ 、Python接口、 JDBC 接口之外TDengine 还提供基于 HTTP 协议的 REST 接口。不同于使用应用客户端开发程序进行的开发。当用户使用 REST 接口的时候,所有的查询处理过程都是在服务器端来完成,用户的应用服务不会参与数据库的计算过程,查询处理完成后结果通过 HTTP的 JSON 格式返回给用户。</p>
<p><center> <img src="../assets/fig2.png"> </center>
<center>图 2. REST查询架构</center></p>
<p>当用户使用基于HTTP的REST查询接口HTTP的请求首先与位于数据节点的HTTP连接器 Connector建立连接然后通过REST的签名机制使用Token来确保请求的可靠性。对于数据节点HTTP连接器接收到请求后调用内嵌的客户端程序发起查询请求内嵌客户端将解析通过HTTP连接器传递过来的SQL语句解析该SQL语句并按需向管理节点请求元数据信息然后向本机或集群中其他节点发送查询请求最后按需聚合计算结果。HTTP连接器接收到请求SQL以后后续的流程处理与采用应用客户端方式的查询处理完全一致。最后还需要将查询的结果转换为JSON格式字符串并通过HTTP 响应返回给客户端。</p>
<p>可以看到在处理HTTP流程的整个过程中用户应用不再参与到查询处理的过程中只负责通过HTTP协议发送SQL请求并接收JSON格式的结果。同时还需要注意的是每个数据节点均内嵌了一个HTTP连接器和客户端程序因此请求集群中任何一个数据节点该数据节点均能够通过HTTP协议返回用户的查询结果。</p>
<a class='anchor' id='技术特征'></a><h3>技术特征</h3>
<p>由于TDengine采用数据和标签分离存储的模式能够极大地降低标签数据存储的冗余度。标签数据直接关联到每个表并采用全内存的结构进行管理和维护标签数据,全内存的结构提供快速的查询处理千万级别规模的标签数据查询可以在毫秒级别返回。首先针对标签数据的过滤可以有效地降低第二阶段的查询涉及的数据规模。为有效地提升查询处理的性能针对物联网数据的不可更改的特点TDengine采用在每个保存的数据块上都记录下该数据块中数据的最大值、最小值、和等统计数据。如果查询处理涉及整个数据块的全部数据则直接使用预计算结果不再读取数据块的内容。由于预计算模块的大小远小于磁盘上存储的具体数据的大小对于磁盘IO为瓶颈的查询处理使用预计算结果可以极大地减小读取IO并加速查询处理的流程。</p>
<p>由于TDengine采用按列存储数据。当从磁盘中读取数据块进行计算的时候按照查询列信息读取该列数据并不需要读取其他不相关的数据可以最小化读取数据。此外由于采用列存储结构数据节点针对数据的扫描采用该列数据块进行可以充分利用CPU L2高速缓存极大地加速数据扫描的速度。此外对于某些查询并不会等全部查询结果生成后再返回结果。例如列选取查询当第一批查询结果获得以后数据节点直接将其返回客户端。同时在查询处理过程中系统在数据节点接收到查询请求以后马上返回客户端查询确认信息并同时拉起查询处理过程并等待查询执行完成后才返回给用户查询有响应。</p>
<a class='anchor' id='TDengine集群设计'></a><h2>TDengine集群设计</h2>
<a class='anchor' id='1集群与主要逻辑单元'></a><h3>1集群与主要逻辑单元</h3>
<p>TDengine是基于硬件、软件系统不可靠、一定会有故障的假设进行设计的是基于任何单台计算机都无足够能力处理海量数据的假设进行设计的。因此TDengine从研发的第一天起就按照分布式高可靠架构进行设计是完全去中心化的是水平扩展的这样任何单台或多台服务器宕机或软件错误都不影响系统的服务。通过节点虚拟化并辅以自动化负载均衡技术TDengine能最大限度地利用异构集群中的计算和存储资源。而且只要数据副本数大于一无论是硬软件的升级、还是IDC的迁移等都无需停止集群的服务极大地保证系统的正常运行并且降低了系统管理员和运维人员的工作量。</p>
<p>下面的示例图上有八个物理节点,每个物理节点被逻辑的划分为多个虚拟节点。下面对系统的基本概念进行介绍。 </p>
<p><img src="../assets/nodes.png" alt="assets/nodes.png" /></p>
<p><strong>物理节点(dnode)</strong>集群中的一物理服务器或云平台上的一虚拟机。为安全以及通讯效率一个物理节点可配置两张网卡或两个IP地址。其中一张网卡用于集群内部通讯其IP地址为<strong>privateIp</strong>, 另外一张网卡用于与集群外部应用的通讯其IP地址为<strong>publicIp</strong>。在一些云平台如阿里云对外的IP地址是映射过来的因此publicIp还有一个对应的内部IP地址<strong>internalIp</strong>(与privateIp不同)。对于只有一个IP地址的物理节点publicIp, privateIp以及internalIp都是同一个地址没有任何区别。一个dnode上有而且只有一个taosd实例运行。 </p>
<p><strong>虚拟数据节点(vnode)</strong>:在物理节点之上的可独立运行的基础逻辑单元,时序数据写入、存储、查询等操作逻辑都在虚拟节点中进行(图中V)采集的时序数据就存储在vnode上。一个vnode包含固定数量的表。当创建一张新表时系统会检查是否需要创建新的vnode。一个物理节点上能创建的vnode的数量取决于物理节点的硬件资源。一个vnode只属于一个DB但一个DB可以有多个vnode。</p>
<p><strong>虚拟数据节点组(vgroup)</strong>: 位于不同物理节点的vnode可以组成一个虚拟数据节点组vnode group(如上图dnode0中的V0, dnode1中的V1, dnode6中的V2属于同一个虚拟节点组)。归属于同一个vgroup的虚拟节点采取master/slave的方式进行管理。写只能在master上进行但采用asynchronous的方式将数据同步到slave这样确保了一份数据在多个物理节点上有拷贝。如果master节点宕机其他节点监测到后将重新选举vgroup里的master, 新的master能继续处理数据请求从而保证系统运行的可靠性。一个vgroup里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N系统必须有至少N个物理节点。副本数在创建DB时通过参数replica可以指定缺省为1。使用TDengine, 数据的安全依靠多副本解决,因此不再需要昂贵的磁盘阵列等存储设备。</p>
<p><strong>虚拟管理节点(mnode)</strong>负责所有节点运行状态的监控和维护以及节点之间的负载均衡图中M。同时虚拟管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理因此也称为Meta Node。TDengine集群中可配置多个(最多不超过5个) mnode它们自动构建成为一个管理节点集群(图中M0, M1, M2)。mnode间采用master/slave的机制进行管理而且采取强一致方式进行数据同步。mnode集群的创建由系统自动完成无需人工干预。每个dnode上至多有一个mnode而且每个dnode都知道整个集群中所有mnode的IP地址。 </p>
<p><strong>taosc</strong>一个软件模块是TDengine给应用提供的驱动程序driver,内嵌于JDBC、ODBC driver中或者C语言连接库里。应用都是通过taosc而不是直接来与整个集群进行交互的。这个模块负责获取并缓存元数据将插入、查询等请求转发到正确的虚拟节点在把结果返回给应用时还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, ODBC, C/C++接口而言这个模块是在应用所处的计算机上运行但消耗的资源很小。为支持全分布式的REST接口taosc在TDengine集群的每个dnode上都有一运行实例。 </p>
<p><strong>对外服务地址</strong>TDengine集群可以容纳单台、多台甚至几千台物理节点。应用只需要向集群中任何一个物理节点的publicIp发起连接即可。启动CLI应用taos时选项-h需要提供的就是publicIp。</p>
<p><strong>master/secondIp</strong>每一个dnode都需要配置一个masterIp。dnode启动后将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp对于集群中的第一个节点就是它自己的privateIp。为保证连接成功每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图链接secondIp。</p>
<p>dnode启动后会获知集群的mnode IP列表并且定时向mnode发送状态信息。</p>
<p>vnode与mnode只是逻辑上的划分都是执行程序taosd里的不同线程而已无需安装不同的软件做任何特殊的配置。最小的系统配置就是一个物理节点,vnode,mnode和taosc都存在而且都正常运行但单一节点无法保证系统的高可靠。 </p>
<a class='anchor' id='2一典型的操作流程'></a><h3>2一典型的操作流程</h3>
<p>为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色下面对写入数据这个典型操作的流程进行剖析。</p>
<p><img src="../assets/Picture2.png" alt="Picture1" /></p>
<ol>
<li>应用通过JDBC、ODBC或其他API接口发起插入数据的请求。</li>
<li>taosc会检查缓存看是有保存有该表的meta data。如果有直接到第4步。如果没有taosc将向mnode发出get meta-data请求。</li>
<li>mnode将该表的meta-data返回给taosc。Meta-data包含有该表的schema, 而且还有该表所属的vgroup信息vnode ID以及所在的dnode的IP地址如果副本数为N就有N组vnodeID/IP)。如果taosc迟迟得不到mnode回应而且存在多个mnode,taosc将向下一个mnode发出请求。</li>
<li>taosc向master vnode发起插入请求。</li>
<li>vnode插入数据后给taosc一个应答表示插入成功。如果taosc迟迟得不到vnode的回应taosc会认为该节点已经离线。这种情况下如果被插入的数据库有多个副本taosc将向vgroup里下一个vnode发出插入请求。</li>
<li>taosc通知APP写入成功。 </li>
</ol>
<p>对于第二和第三步taosc启动时并不知道mnode的IP地址因此会直接向配置的集群对外服务的IP地址发起请求。如果接收到该请求的dnode并没有配置mnode该dnode会在回复的消息中告知mnode的IP地址列表如果有多个dnodesmnode的IP地址可以有多个这样taosc会重新向新的mnode的IP地址发出获取meta-data的请求。</p>
<p>对于第四和第五步没有缓存的情况下taosc无法知道虚拟节点组里谁是master就假设第一个vnodeID/IP就是master,向它发出请求。如果接收到请求的vnode并不是master,它会在回复中告知谁是master这样taosc就向建议的master vnode发出请求。一旦得到插入成功的回复taosc会缓存住master节点的信息。</p>
<p>上述是插入数据的流程查询、计算的流程也完全一致。taosc把这些复杂的流程全部封装屏蔽了因此应用无需处理重定向、获取meta data等细节完全是透明的。</p>
<p>通过taosc缓存机制只有在第一次对一张表操作时才需要访问mnode, 因此mnode不会成为系统瓶颈。但因为schema有可能变化而且vgroup有可能发生改变比如负载均衡发生因此taosc需要定时自动刷新缓存。</p>
<a class='anchor' id='3数据分区'></a><h3>3数据分区</h3>
<p>vnode(虚拟数据节点)保存采集的时序数据而且查询、计算都在这些节点上进行。为便于负载均衡、数据恢复、支持异构环境TDengine将一个物理节点根据其计算和存储资源切分为多个vnode。这些vnode的管理是TDengine自动完成的对应用完全透明。 </p>
<p>对于单独一个数据采集点无论其数据量多大一个vnode或vnode group, 如果副本数大于1有足够的计算资源和存储资源来处理如果每秒生成一条16字节的记录一年产生的原始数据不到0.5G因此TDengine将一张表的所有数据都存放在一个vnode里而不会让同一个采集点的数据分布到两个或多个dnode上。而且一个vnode可存储多张表的数据一个vnode可容纳的表的数目由配置参数tables指定缺省为2000。设计上一个vnode里所有的表都属于同一个DB。因此一个数据库DB需要的vnode或vgroup的个数等于数据库表的数目/tables。 </p>
<p>创建DB时系统并不会马上分配资源。但当创建一张表时系统将看是否有已经分配的vnode, 而且是否有空位如果有立即在该有空位的vnode创建表。如果没有系统将从集群中根据当前的负载情况在一个dnode上创建一新的vnode, 然后创建表。如果DB有多个副本系统不是只创建一个vnode而是一个vgroup(虚拟数据节点组)。系统对vnode的数目没有任何限制仅仅受限于物理节点本身的计算和存储资源。</p>
<p>参数tables的设置需要考虑具体场景创建DB时可以个性化指定该参数。该参数不宜过大也不宜过小。过小极端情况就是每个数据采集点一个vnode, 这样导致系统数据文件过多。过大虚拟化带来的优势就会丧失。给定集群计算资源的情况下整个系统vnode的个数应该是CPU核的数目的两倍以上。</p>
<a class='anchor' id='4负载均衡'></a><h3>4负载均衡</h3>
<p>每个dnode(物理节点)都定时向 mnode(虚拟管理节点)报告其状态包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等因此mnode了解整个集群的状态。基于整体状态当mnode发现某个dnode负载过重它会将dnode上的一个或多个vnode挪到其他dnode。在挪动过程中对外服务继续进行数据插入、查询和计算操作都不受影响。负载均衡操作结束后应用也无需重启将自动连接新的vnode。 </p>
<p>如果mnode一段时间没有收到dnode的状态报告mnode会认为这个dnode已经离线。如果离线时间超过一定时长时长由配置参数offlineThreshold决定该dnode将被mnode强制剔除出集群。该dnode上的vnodes如果副本数大于一系统将自动在其他dnode上创建新的副本以保证数据的副本数。</p>
<p><strong>Note</strong>目前集群功能仅仅限于企业版</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -1,110 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/super-table-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/super-table-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>超级表STable多表聚合</h1>
<p>TDengine要求每个数据采集点单独建表这样能极大提高数据的插入/查询性能但是导致系统中表的数量猛增让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度TDengine引入了超级表STable (Super Table)的概念。</p>
<a class='anchor' id='什么是超级表'></a><h2>什么是超级表</h2>
<p>STable是同一类型数据采集点的抽象是同类型采集实例的集合包含多张数据结构一样的子表。每个STable为其子表定义了表结构和一组标签表结构即表中记录的数据列及其数据类型标签名和数据类型由STable定义标签值记录着每个子表的静态信息用以对子表进行分组过滤。子表本质上就是普通的表由一个时间戳主键和若干个数据列组成每行记录着具体的数据数据查询操作与普通表完全相同但子表与普通表的区别在于每个子表从属于一张超级表并带有一组由STable定义的标签值。每种类型的采集设备可以定义一个STable。数据模型定义表的每列数据的类型如温度、压力、电压、电流、GPS实时位置等而标签信息属于Meta Data如采集设备的序列号、型号、位置等是静态的是表的元数据。用户在创建表数据采集点时指定STable(采集类型)外,还可以指定标签的值,也可事后增加或修改。</p>
<p>TDengine扩展标准SQL语法用于定义STable使用关键词tags指定标签信息。语法如下</p>
<pre><code class="mysql language-mysql">CREATE TABLE &lt;stable_name&gt; (&lt;field_name&gt; TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …) </code></pre>
<p>其中tag_name是标签名tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型标签的个数最多为6个名字不能与系统关键词相同也不能与其他列名相同。如</p>
<pre><code class="mysql language-mysql">create table thermometer (ts timestamp, degree float)
tags (location binary(20), type int)</code></pre>
<p>上述SQL创建了一个名为thermometer的STable带有标签location和标签type。</p>
<p>为某个采集点创建表时可以指定其所属的STable以及标签的值语法如下</p>
<pre><code class="mysql language-mysql">CREATE TABLE &lt;tb_name&gt; USING &lt;stb_name&gt; TAGS (tag_value1,...)</code></pre>
<p>沿用上面温度计的例子使用超级表thermometer建立单个温度计数据表的语句如下</p>
<pre><code class="mysql language-mysql">create table t1 using thermometer tags (beijing', 10)</code></pre>
<p>上述SQL以thermometer为模板创建了名为t1的表这张表的Schema就是thermometer的Schema但标签location值为'beijing'标签type值为10。</p>
<p>用户可以使用一个STable创建数量无上限的具有不同标签的表从这个意义上理解STable就是若干具有相同数据模型不同标签的表的集合。与普通表一样用户可以创建、删除、查看超级表STable大部分适用于普通表的查询操作都可运用到STable上包括各种聚合和投影选择函数。除此之外可以设置标签的过滤条件仅对STbale中部分表进行聚合查询大大简化应用的开发。</p>
<p>TDengine对表的主键时间戳建立索引暂时不提供针对数据模型中其他采集量比如温度、压力值的索引。每个数据采集点会采集若干数据记录但每个采集点的标签仅仅是一条记录因此数据标签在存储上没有冗余且整体数据规模有限。TDengine将标签数据与采集的动态数据完全分离存储而且针对STable的标签建立了高性能内存索引结构为标签提供全方位的快速操作支持。用户可按照需求对其进行增删改查CreateRetrieveUpdateDeleteCRUD操作。</p>
<p>STable从属于库一个STable只属于一个库但一个库可以有一到多个STable, 一个STable可有多个子表。</p>
<a class='anchor' id='超级表管理'></a><h2>超级表管理</h2>
<ul>
<li><p>创建超级表</p>
<pre><code class="mysql language-mysql">CREATE TABLE &lt;stable_name&gt; (&lt;field_name&gt; TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)</code></pre>
<p>与创建表的SQL语法相似。但需指定TAGS字段的名称和类型。 </p>
<p>说明:</p>
<ol>
<li>TAGS列总长度不能超过512 bytes</li>
<li>TAGS列的数据类型不能是timestamp类型</li>
<li>TAGS列名不能与其他列名相同;</li>
<li>TAGS列名不能为预留关键字. </li></ol></li>
<li><p>显示已创建的超级表</p>
<pre><code class="mysql language-mysql">show stables;</code></pre>
<p>查看数据库内全部STable及其相关信息包括STable的名称、创建时间、列数量、标签TAG数量、通过该STable建表的数量。 </p></li>
<li><p>删除超级表</p>
<pre><code class="mysql language-mysql">DROP TABLE &lt;stable_name&gt;</code></pre>
<p>Note: 删除STable时所有通过该STable创建的表都将被删除。</p></li>
<li><p>查看属于某STable并满足查询条件的表</p>
<pre><code class="mysql language-mysql">SELECT TBNAME,[TAG_NAME,…] FROM &lt;stable_name&gt; WHERE &lt;tag_name&gt; &lt;[=|=&lt;|&gt;=|&lt;&gt;] values..&gt; ([AND|OR] …)</code></pre>
<p>查看属于某STable并满足查询条件的表。说明TBNAME为关键词显示通过STable建立的子表表名查询过程中可以使用针对标签的条件。</p>
<pre><code class="mysql language-mysql">SELECT COUNT(TBNAME) FROM &lt;stable_name&gt; WHERE &lt;tag_name&gt; &lt;[=|=&lt;|&gt;=|&lt;&gt;] values..&gt; ([AND|OR] …)</code></pre>
<p>统计属于某个STable并满足查询条件的子表的数量</p></li>
</ul>
<a class='anchor' id='写数据时自动建子表'></a><h2>写数据时自动建子表</h2>
<p>在某些特殊场景中用户在写数据时并不确定某个设备的表是否存在此时可使用自动建表语法来实现写入数据时用超级表定义的表结构自动创建不存在的子表若该表已存在则不会建立新表。注意自动建表语句只能自动建立子表而不能建立超级表这就要求超级表已经被事先定义好。自动建表语法跟insert/import语法非常相似唯一区别是语句中增加了超级表和标签信息。具体语法如下</p>
<pre><code class="mysql language-mysql">INSERT INTO &lt;tb_name&gt; USING &lt;stb_name&gt; TAGS (&lt;tag1_value&gt;, ...) VALUES (field_value, ...) (field_value, ...) ...;</code></pre>
<p>向表tb_name中插入一条或多条记录如果tb_name这张表不存在则会用超级表stb_name定义的表结构以及用户指定的标签值(即tag1_value…)来创建名为tb_name新表并将用户指定的值写入表中。如果tb_name已经存在则建表过程会被忽略系统也不会检查tb_name的标签是否与用户指定的标签值一致也即不会更新已存在表的标签。</p>
<pre><code class="mysql language-mysql">INSERT INTO &lt;tb1_name&gt; USING &lt;stb1_name&gt; TAGS (&lt;tag1_value1&gt;, ...) VALUES (&lt;field1_value1&gt;, ...) (&lt;field1_value2&gt;, ...) ... &lt;tb_name2&gt; USING &lt;stb_name2&gt; TAGS(&lt;tag1_value2&gt;, ...) VALUES (&lt;field1_value1&gt;, ...) ...;</code></pre>
<p>向多张表tb1_nametb2_name等插入一条或多条记录并分别指定各自的超级表进行自动建表。</p>
<a class='anchor' id='STable中TAG管理'></a><h2>STable中TAG管理</h2>
<p>除了更新标签的值的操作是针对子表进行其他所有的标签操作添加标签、删除标签等均只能作用于STable不能对单个子表操作。对STable添加标签以后依托于该STable建立的所有表将自动增加了一个标签对于数值型的标签新增加的标签的默认值是0.</p>
<ul>
<li><p>添加新的标签</p>
<pre><code class="mysql language-mysql">ALTER TABLE &lt;stable_name&gt; ADD TAG &lt;new_tag_name&gt; &lt;TYPE&gt;</code></pre>
<p>为STable增加一个新的标签并指定新标签的类型。标签总数不能超过6个。</p></li>
<li><p>删除标签</p>
<pre><code class="mysql language-mysql">ALTER TABLE &lt;stable_name&gt; DROP TAG &lt;tag_name&gt;</code></pre>
<p>删除超级表的一个标签,从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。</p>
<p>说明第一列标签不能删除至少需要为STable保留一个标签。</p></li>
<li><p>修改标签名</p>
<pre><code class="mysql language-mysql">ALTER TABLE &lt;stable_name&gt; CHANGE TAG &lt;old_tag_name&gt; &lt;new_tag_name&gt;</code></pre>
<p>修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。</p></li>
<li><p>修改子表的标签值</p>
<pre><code class="mysql language-mysql">ALTER TABLE &lt;table_name&gt; SET TAG &lt;tag_name&gt;=&lt;new_tag_value&gt;</code></pre></li>
</ul>
<a class='anchor' id='STable多表聚合'></a><h2>STable多表聚合</h2>
<p>针对所有的通过STable创建的子表进行多表聚合查询支持按照全部的TAG值进行条件过滤并可将结果按照TAGS中的值进行聚合暂不支持针对binary类型的模糊匹配过滤。语法如下</p>
<pre><code class="mysql language-mysql">SELECT function&lt;field_name&gt;,…
FROM &lt;stable_name&gt;
WHERE &lt;tag_name&gt; &lt;[=|&lt;=|&gt;=|&lt;&gt;] values..&gt; ([AND|OR] …)
INTERVAL (&lt;time range&gt;)
GROUP BY &lt;tag_name&gt;, &lt;tag_name&gt;
ORDER BY &lt;tag_name&gt; &lt;asc|desc&gt;
SLIMIT &lt;group_limit&gt;
SOFFSET &lt;group_offset&gt;
LIMIT &lt;record_limit&gt;
OFFSET &lt;record_offset&gt;</code></pre>
<p><strong>说明</strong></p>
<p>超级表聚合查询TDengine目前支持以下聚合\选择函数sum、count、avg、first、last、min、max、top、bottom以及针对全部或部分列的投影操作使用方式与单表查询的计算过程相同。暂不支持其他类型的聚合计算和四则运算。当前所有的函数及计算过程均不支持嵌套的方式进行执行。</p>
<p>不使用GROUP BY的查询将会对超级表下所有满足筛选条件的表按时间进行聚合结果输出默认是按照时间戳单调递增输出用户可以使用ORDER BY _c0 ASC|DESC选择查询结果时间戳的升降排序使用GROUP BY <tag_name> 的聚合查询会按照tags进行分组并对每个组内的数据分别进行聚合输出结果为各个组的聚合结果组间的排序可以由ORDER BY <tag_name> 语句指定,每个分组内部,时间序列是单调递增的。 </p>
<p>使用SLIMIT/SOFFSET语句指定组间分页即指定结果集中输出的最大组数以及对组起始的位置。使用LIMIT/OFFSET语句指定组内分页即指定结果集中每个组内最多输出多少条记录以及记录起始的位置。</p>
<a class='anchor' id='STable使用示例'></a><h2>STable使用示例</h2>
<p>以温度传感器采集时序数据作为例示范STable的使用。 在这个例子中对每个温度计都会建立一张表表名为温度计的ID温度计读数的时刻记为ts采集的值记为degree。通过tags给每个采集器打上不同的标签其中记录温度计的地区和类型以方便我们后面的查询。所有温度计的采集量都一样因此我们用STable来定义表结构。</p>
<a class='anchor' id='定义STable表结构并使用它创建子表'></a><h3>定义STable表结构并使用它创建子表</h3>
<p>创建STable语句如下</p>
<pre><code class="mysql language-mysql">CREATE TABLE thermometer (ts timestamp, degree double)
TAGS(location binary(20), type int)</code></pre>
<p>假设有北京天津和上海三个地区的采集器共4个温度采集器有3种类型我们就可以对每个采集器建表如下 </p>
<pre><code class="mysql language-mysql">CREATE TABLE therm1 USING thermometer TAGS ('beijing', 1);
CREATE TABLE therm2 USING thermometer TAGS ('beijing', 2);
CREATE TABLE therm3 USING thermometer TAGS ('tianjin', 1);
CREATE TABLE therm4 USING thermometer TAGS ('shanghai', 3);</code></pre>
<p>其中therm1therm2therm3therm4是超级表thermometer四个具体的子表也即普通的Table。以therm1为例它表示采集器therm1的数据表结构完全由thermometer定义标签location=”beijing”, type=1表示therm1的地区是北京类型是第1类的温度计。</p>
<a class='anchor' id='写入数据'></a><h3>写入数据</h3>
<p>注意写入数据时不能直接对STable操作而是要对每张子表进行操作。我们分别向四张表therm1therm2 therm3 therm4写入一条数据写入语句如下</p>
<pre><code class="mysql language-mysql">INSERT INTO therm1 VALUES ('2018-01-01 00:00:00.000', 20);
INSERT INTO therm2 VALUES ('2018-01-01 00:00:00.000', 21);
INSERT INTO therm3 VALUES ('2018-01-01 00:00:00.000', 24);
INSERT INTO therm4 VALUES ('2018-01-01 00:00:00.000', 23);</code></pre>
<a class='anchor' id='按标签聚合查询'></a><h3>按标签聚合查询</h3>
<p>查询位于北京(beijing)和天津(tianjing)两个地区的温度传感器采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)、最低温度min(degree),并将结果按所处地域(location)和传感器类型(type)进行聚合。</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location='beijing' or location='tianjin'
GROUP BY location, type </code></pre>
<a class='anchor' id='按时间周期聚合查询'></a><h3>按时间周期聚合查询</h3>
<p>查询仅位于北京以外地区的温度传感器最近24小时(24h)采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)和最低温度min(degree)将采集结果按照10分钟为周期进行聚合并将结果按所处地域(location)和传感器类型(type)再次进行聚合。</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location&lt;&gt;'beijing' and ts&gt;=now-1d
INTERVAL(10M)
GROUP BY location, type</code></pre><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,388 +0,0 @@
<!DOCTYPE html><html lang='cn'><head><title>文档 | 涛思数据</title><meta name='description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。'><meta name='keywords' content='大数据Big Data开源物联网车联网工业互联网IT运维, 时序数据库缓存数据订阅消息队列流式计算开源涛思数据TAOS Data, TDengine'><meta name='title' content='文档 | 涛思数据'><meta property='og:site_name' content='涛思数据'/><meta property='og:title' content='文档 | 涛思数据'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/cn/documentation/taos-sql-ch/index.php'/><meta property='og:description' content='TDengine是一个开源的专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的工作量。' /><link rel='canonical' href='https://www.taosdata.com/cn/documentation/taos-sql-ch/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>回去</a><h1>TAOS SQL</h1>
<p>TDengine提供类似SQL语法用户可以在TDengine Shell中使用SQL语句操纵数据库也可以通过C/C++, Java(JDBC), Python, Go等各种程序来执行SQL语句。 </p>
<p>本章节SQL语法遵循如下约定</p>
<ul>
<li>< > 里的内容是用户需要输入的,但不要输入&lt;&gt;本身</li>
<li>[ ]表示内容为可选项,但不能输入[]本身</li>
<li>| 表示多选一,选择其中一个即可,但不能输入|本身</li>
<li>… 表示前面的项可重复多个</li>
</ul>
<a class='anchor' id='支持的数据类型'></a><h2>支持的数据类型</h2>
<p>使用TDengine最重要的是时间戳。创建并插入记录、查询历史记录的时候均需要指定时间戳。时间戳有如下规则</p>
<ul>
<li>时间格式为YYYY-MM-DD HH:mm:ss.MS, 默认时间分辨率为毫秒。比如2017-08-12 18:25:58.128</li>
<li>内部函数now是服务器的当前时间</li>
<li>插入记录时如果时间戳为0插入数据时使用服务器当前时间</li>
<li>Epoch Time: 时间戳也可以是一个长整数表示从1970-01-01 08:00:00.000开始的毫秒数</li>
<li>时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。比如select * from t1 where ts &gt; now-2w and ts &lt;= now-1w, 表示查询两周前整整一周的数据</li>
</ul>
<p>TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMicrosecond就可支持微秒。</p>
<p>在TDengine中普通表的数据模型中可使用以下10种数据类型。 </p>
<figure><table>
<thead>
<tr>
<th></th>
<th style="text-align:center;">类型</th>
<th>Bytes</th>
<th>说明</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td style="text-align:center;">TIMESTAMP</td>
<td>8</td>
<td>时间戳。最小精度毫秒。从格林威治时间1970-01-01 08:00:00.000开始,计时不能早于该时间。</td>
</tr>
<tr>
<td>2</td>
<td style="text-align:center;">INT</td>
<td>4</td>
<td>整型,范围 [-2^31+1, 2^31-1], -2^31被用作Null值</td>
</tr>
<tr>
<td>3</td>
<td style="text-align:center;">BIGINT</td>
<td>8</td>
<td>长整型,范围 [-2^59, 2^59]</td>
</tr>
<tr>
<td>4</td>
<td style="text-align:center;">FLOAT</td>
<td>4</td>
<td>浮点型有效位数6-7范围 [-3.4E38, 3.4E38]</td>
</tr>
<tr>
<td>5</td>
<td style="text-align:center;">DOUBLE</td>
<td>8</td>
<td>双精度浮点型有效位数15-16范围 [-1.7E308, 1.7E308]</td>
</tr>
<tr>
<td>6</td>
<td style="text-align:center;">BINARY</td>
<td>自定义</td>
<td>用于记录字符串最长不能超过504 bytes。binary仅支持字符串输入字符串两端使用单引号引用否则英文全部自动转化为小写。使用时须指定大小如binary(20)定义了最长为20个字符的字符串每个字符占1byte的存储空间。如果用户字符串超出20字节将被自动截断。对于字符串内的单引号可以用转义字符反斜线加单引号来表示<strong>\</strong></td>
</tr>
<tr>
<td>7</td>
<td style="text-align:center;">SMALLINT</td>
<td>2</td>
<td>短整型, 范围 [-32767, 32767]</td>
</tr>
<tr>
<td>8</td>
<td style="text-align:center;">TINYINT</td>
<td>1</td>
<td>单字节整型,范围 [-127, 127]</td>
</tr>
<tr>
<td>9</td>
<td style="text-align:center;">BOOL</td>
<td>1</td>
<td>布尔型,{true, false}</td>
</tr>
<tr>
<td>10</td>
<td style="text-align:center;">NCHAR</td>
<td>自定义</td>
<td>用于记录非ASCII字符串如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用字符串内的单引号需用转义字符 <strong>\</strong>。nchar使用时须指定字符串大小类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符会固定占用40bytes的空间。如用户字符串长度超出声明长度则将被自动截断。</td>
</tr>
</tbody>
</table></figure>
<p><strong>Tips</strong>: TDengine对SQL语句中的英文字符不区分大小写自动转化为小写执行。因此用户大小写敏感的字符串及密码需要使用单引号将字符串引起来。</p>
<a class='anchor' id='数据库管理'></a><h2>数据库管理</h2>
<ul>
<li><p><strong>创建数据库</strong> </p>
<pre><code class="mysql language-mysql">CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep]</code></pre>
<p>创建数据库。<code>KEEP</code>是该数据库的数据保留多长天数缺省是3650天(10年),数据库会自动删除超过时限的数据。数据库还有更多与存储相关的配置参数,请参见<a href="../administrator/#服务端配置">系统管理</a></p></li>
<li><p><strong>使用数据库</strong></p>
<pre><code class="mysql language-mysql">USE db_name</code></pre>
<p>使用/切换数据库</p></li>
<li><p><strong>删除数据库</strong></p>
<pre><code class="mysql language-mysql">DROP DATABASE [IF EXISTS] db_name</code></pre>
<p>删除数据库。所包含的全部数据表将被删除,谨慎使用</p></li>
<li><p><strong>显示系统所有数据库</strong></p>
<pre><code class="mysql language-mysql">SHOW DATABASES</code></pre></li>
</ul>
<a class='anchor' id='表管理'></a><h2>表管理</h2>
<ul>
<li><p><strong>创建数据表</strong></p>
<pre><code class="mysql language-mysql">CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...])</code></pre>
<p>说明1表的第一个字段必须是TIMESTAMP并且系统自动将其设为主键2表的每行长度不能超过4096字节3使用数据类型binary或nchar需指定其最长的字节数如binary(20)表示20字节。</p></li>
<li><p><strong>删除数据表</strong></p>
<pre><code class="mysql language-mysql">DROP TABLE [IF EXISTS] tb_name</code></pre></li>
<li><p><strong>显示当前数据库下的所有数据表信息</strong></p>
<pre><code class="mysql language-mysql">SHOW TABLES [LIKE tb_name_wildcar]</code></pre>
<p>显示当前数据库下的所有数据表信息。说明可在like中使用通配符进行名称的匹配。 通配符匹配1% (百分号)匹配0到任意个字符2_下划线匹配一个字符。</p></li>
<li><p><strong>获取表的结构信息</strong></p>
<pre><code class="mysql language-mysql">DESCRIBE tb_name</code></pre></li>
<li><p><strong>表增加列</strong></p>
<pre><code class="mysql language-mysql">ALTER TABLE tb_name ADD COLUMN field_name data_type</code></pre></li>
<li><p><strong>表删除列</strong></p>
<pre><code class="mysql language-mysql">ALTER TABLE tb_name DROP COLUMN field_name </code></pre>
<p>如果表是通过<a href="../super-table/">超级表</a>创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构</p>
<p><strong>Tips</strong>SQL语句中操作的当前数据库通过use db_name的方式指定中的表不需要指定表所属数据库。如果要操作非当前数据库中的表需要采用“库名”.“表名”的方式。例如demo.tb1是指数据库demo中的表tb1。</p></li>
</ul>
<a class='anchor' id='数据写入'></a><h2>数据写入</h2>
<ul>
<li><p><strong>插入一条记录</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb_name VALUES (field_value, ...);</code></pre>
<p>向表tb_name中插入一条记录</p></li>
<li><p><strong>插入一条记录,数据对应到指定的列</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb_name (field1_name, ...) VALUES(field1_value, ...)</code></pre>
<p>向表tb_name中插入一条记录数据对应到指定的列。SQL语句中没有出现的列数据库将自动填充为NULL。主键时间戳不能为NULL。</p></li>
<li><p><strong>插入多条记录</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;</code></pre>
<p>向表tb_name中插入多条记录</p></li>
<li><p><strong>按指定的列插入多条记录</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb_name (field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)</code></pre>
<p>向表tb_name中按指定的列插入多条记录</p></li>
<li><p><strong>向多个表插入多条记录</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
tb2_name VALUES (field1_value1, ...)(field1_value2, ...)...;</code></pre>
<p>同时向表tb1_name和tb2_name中分别插入多条记录</p></li>
<li><p><strong>同时向多个表按列插入多条记录</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value1, ...)
tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)</code></pre>
<p>同时向表tb1_name和tb2_name中按列分别插入多条记录 </p></li>
</ul>
<p>注意对同一张表插入的新记录的时间戳必须递增否则会跳过插入该条记录。如果时间戳为0系统将自动使用服务器当前时间作为该记录的时间戳。</p>
<p><strong>IMPORT</strong>如果需要将时间戳小于最后一条记录时间的记录写入到数据库中可使用IMPORT替代INSERT命令IMPORT的语法与INSERT完全一样。如果同时IMPORT多条记录需要保证一批记录是按时间戳排序好的。</p>
<a class='anchor' id='数据查询'></a><h2>数据查询</h2>
<a class='anchor' id='查询语法是:'></a><h3>查询语法是:</h3>
<pre><code class="mysql language-mysql">SELECT {* | expr_list} FROM tb_name
[WHERE where_condition]
[ORDER BY _c0 { DESC | ASC }]
[LIMIT limit [, OFFSET offset]]
[&gt;&gt; export_file]
SELECT function_list FROM tb_name
[WHERE where_condition]
[LIMIT limit [, OFFSET offset]]
[&gt;&gt; export_file]</code></pre>
<ul>
<li>可以使用* 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名</li>
<li>where语句可以使用各种逻辑判断来过滤数字值或使用通配符来过滤字符串</li>
<li>输出结果缺省按首列时间戳升序排序,但可以指定按降序排序(_c0指首列时间戳)。使用ORDER BY对其他字段进行排序为非法操作。</li>
<li>参数LIMIT控制输出条数OFFSET指定从第几条开始输出。LIMIT/OFFSET对结果集的执行顺序在ORDER BY之后。</li>
<li>通过”&gt;&gt;"输出结果可以导出到指定文件</li>
</ul>
<a class='anchor' id='支持的条件过滤操作'></a><h3>支持的条件过滤操作</h3>
<figure><table>
<thead>
<tr>
<th>Operation</th>
<th>Note</th>
<th>Applicable Data Types</th>
</tr>
</thead>
<tbody>
<tr>
<td>&gt;</td>
<td>larger than</td>
<td><strong><code>timestamp</code></strong> and all numeric types</td>
</tr>
<tr>
<td>&lt;</td>
<td>smaller than</td>
<td><strong><code>timestamp</code></strong> and all numeric types</td>
</tr>
<tr>
<td>&gt;=</td>
<td>larger than or equal to</td>
<td><strong><code>timestamp</code></strong> and all numeric types</td>
</tr>
<tr>
<td>&lt;=</td>
<td>smaller than or equal to</td>
<td><strong><code>timestamp</code></strong> and all numeric types</td>
</tr>
<tr>
<td>=</td>
<td>equal to</td>
<td>all types</td>
</tr>
<tr>
<td>&lt;&gt;</td>
<td>not equal to</td>
<td>all types</td>
</tr>
<tr>
<td>%</td>
<td>match with any char sequences</td>
<td><strong><code>binary</code></strong> <strong><code>nchar</code></strong></td>
</tr>
<tr>
<td>_</td>
<td>match with a single char</td>
<td><strong><code>binary</code></strong> <strong><code>nchar</code></strong></td>
</tr>
</tbody>
</table></figure>
<ol>
<li>同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件暂不支持OR连接的查询条件。</li>
<li>针对某一字段的过滤只支持单一区间的过滤条件。例如value&gt;20 and value<30是合法的过滤条件, 而Value<20 AND value<>5是非法的过滤条件。</li>
</ol>
<a class='anchor' id='Some-Examples'></a><h3>Some Examples</h3>
<ul>
<li><p>对于下面的例子表tb1用以下语句创建</p>
<pre><code class="mysql language-mysql">CREATE TABLE tb1 (ts timestamp, col1 int, col2 float, col3 binary(50))</code></pre></li>
<li><p>查询tb1刚过去的一个小时的所有记录</p>
<pre><code class="mysql language-mysql">SELECT * FROM tb1 WHERE ts &gt;= NOW - 1h</code></pre></li>
<li><p>查询表tb1从2018-06-01 08:00:00.000 到2018-06-02 08:00:00.000时间范围并且clo3的字符串是'nny'结尾的记录,结果按照时间戳降序</p>
<pre><code class="mysql language-mysql">SELECT * FROM tb1 WHERE ts &gt; '2018-06-01 08:00:00.000' AND ts &lt;= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC</code></pre></li>
<li><p>查询col1与col2的和并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2结果输出仅仅10条记录从第5条开始</p>
<pre><code class="mysql language-mysql">SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts &gt; '2018-06-01 08:00:00.000' and col2 &gt; 1.2 LIMIT 10 OFFSET 5</code></pre></li>
<li><p>查询过去10分钟的记录col2的值大于3.14,并且将结果输出到文件 <code>/home/testoutpu.csv</code>.</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*) FROM tb1 WHERE ts &gt;= NOW - 10m AND col2 &gt; 3.14 &gt;&gt; /home/testoutpu.csv</code></pre></li>
</ul>
<a class='anchor' id='SQL函数'></a><h2>SQL函数</h2>
<a class='anchor' id='聚合函数'></a><h3>聚合函数</h3>
<p>TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数如下表</p>
<ul>
<li><p><strong>COUNT</strong></p>
<pre><code class="mysql language-mysql">SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中记录行数或某列的非空值个数。<br />
返回结果数据类型长整型INT64。<br />
应用字段:应用全部字段。<br />
适用于:表、超级表。<br />
说明1可以使用星号<em>来替代具体的字段,使用星号(</em>)返回全部记录数量。2针对同一表的不包含NULL值字段查询结果均相同。3如果统计对象是具体的列则返回该列中非NULL值的记录数量。 </p></li>
<li><p><strong>AVG</strong></p>
<pre><code class="mysql language-mysql">SELECT AVG(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列的平均值。<br />
返回结果数据类型双精度浮点数Double。<br />
应用字段不能应用在timestamp、binary、nchar、bool字段。<br />
适用于:表、超级表。 </p></li>
<li><p><strong>WAVG</strong></p>
<pre><code class="mysql language-mysql">SELECT WAVG(field_name) FROM tb_name WHERE clause</code></pre>
<p>功能说明:统计表/超级表中某列在一段时间内的时间加权平均。<br />
返回结果数据类型双精度浮点数Double。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
适用于:表、超级表。</p></li>
<li><p><strong>SUM</strong></p>
<pre><code class="mysql language-mysql">SELECT SUM(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列的和。<br />
返回结果数据类型双精度浮点数Double和长整型INT64。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
适用于:表、超级表。</p></li>
<li><p><strong>STDDEV</strong></p>
<pre><code class="mysql language-mysql">SELECT STDDEV(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>功能说明:统计表中某列的均方差。<br />
返回结果数据类型双精度浮点数Double。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
适用于:表。</p></li>
<li><p><strong>LEASTSQUARES</strong></p>
<pre><code class="mysql language-mysql">SELECT LEASTSQUARES(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。<br />
返回结果数据类型:字符串表达式(斜率, 截距)。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
说明:自变量是时间戳,因变量是该列的值。<br />
适用于:表。</p></li>
</ul>
<a class='anchor' id='选择函数'></a><h3>选择函数</h3>
<ul>
<li><p><strong>MIN</strong></p>
<pre><code class="mysql language-mysql">SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列的值最小值。<br />
返回结果数据类型:同应用的字段。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。</p></li>
<li><p><strong>MAX</strong></p>
<pre><code class="mysql language-mysql">SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列的值最大值。<br />
返回结果数据类型:同应用的字段。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。</p></li>
<li><p><strong>FIRST</strong></p>
<pre><code class="mysql language-mysql">SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列的值最先写入的非NULL值。<br />
返回结果数据类型:同应用的字段。<br />
应用字段:所有字段。<br />
说明1如果要返回各个列的首个时间戳最小非NULL值可以使用FIRST(*)2) 如果结果集中的某列全部为NULL值则该列的返回结果也是NULL3) 如果结果集中所有列全部为NULL值则不返回结果。</p></li>
<li><p><strong>LAST</strong></p>
<pre><code class="mysql language-mysql">SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列的值最后写入的非NULL值。<br />
返回结果数据类型:同应用的字段。<br />
应用字段:所有字段。<br />
说明1如果要返回各个列的最后时间戳最大一个非NULL值可以使用LAST(*)2如果结果集中的某列全部为NULL值则该列的返回结果也是NULL如果结果集中所有列全部为NULL值则不返回结果。</p></li>
<li><p><strong>TOP</strong></p>
<pre><code class="mysql language-mysql">SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>功能说明: 统计表/超级表中某列的值最大<em>k</em>个非NULL值。若多于k个列值并列最大则返回时间戳小的。<br />
返回结果数据类型:同应用的字段。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
说明1<em>k</em>值取值范围1≤<em>k</em>≤1002系统同时返回该记录关联的时间戳列。 </p></li>
<li><p><strong>BOTTOM</strong></p>
<pre><code class="mysql language-mysql">SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列的值最小<em>k</em>个非NULL值。若多于k个列值并列最小则返回时间戳小的。<br />
返回结果数据类型:同应用的字段。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
说明1<em>k</em>值取值范围1≤<em>k</em>≤1002系统同时返回该记录关联的时间戳列。</p></li>
<li><p><strong>PERCENTILE</strong></p>
<pre><code class="mysql language-mysql">SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>功能说明:统计表中某列的值百分比分位数。<br />
返回结果数据类型: 双精度浮点数Double。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
说明:<em>k</em>值取值范围0≤<em>k</em>≤100为0的时候等同于MIN为100的时候等同于MAX。</p></li>
<li><p><strong>LAST_ROW</strong></p>
<pre><code class="mysql language-mysql">SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }</code></pre>
<p>功能说明:返回表(超级表)的最后一条记录。<br />
返回结果数据类型:同应用的字段。<br />
应用字段:所有字段。<br />
说明与last函数不同last_row不支持时间范围限制强制返回最后一条记录。</p></li>
</ul>
<a class='anchor' id='计算函数'></a><h3>计算函数</h3>
<ul>
<li><p><strong>DIFF</strong></p>
<pre><code class="mysql language-mysql">SELECT DIFF(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>功能说明:统计表中某列的值与前一行对应值的差。<br />
返回结果数据类型: 同应用字段。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。</p></li>
<li><p><strong>SPREAD</strong></p>
<pre><code class="mysql language-mysql">SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列的最大值和最小值之差。<br />
返回结果数据类型: 双精度浮点数。<br />
应用字段不能应用在binary、nchar、bool类型字段。<br />
说明可用于TIMESTAMP字段此时表示记录的时间覆盖范围。</p></li>
<li><p><strong>四则运算</strong></p>
<pre><code class="mysql language-mysql">SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>功能说明:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。<br />
返回结果数据类型:双精度浮点数。<br />
应用字段不能应用在timestamp、binary、nchar、bool类型字段。<br />
说明1支持两列或多列之间进行计算可使用括号控制计算优先级;2NULL字段不参与计算如果参与计算的某行中包含NULL该行的计算结果为NULL。</p></li>
</ul>
<a class='anchor' id='时间维度聚合'></a><h2>时间维度聚合</h2>
<p>TDengine支持按时间段进行聚合可以将表中数据按照时间段进行切割后聚合生成结果比如温度传感器每秒采集一次数据但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下:</p>
<pre><code class="mysql language-mysql">SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
[FILL ({ VALUE | PREV | NULL | LINEAR})]
INTERVAL (interval)
[GROUP BY tags]</code></pre>
<ul>
<li>聚合时间段的长度由关键词INTERVAL指定最短时间间隔10毫秒10a。聚合查询中能够同时执行的聚合和选择函数仅限于单个输出的函数count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last不能使用具有多行输出结果的函数例如top、bottom、diff以及四则运算</li>
<li>WHERE语句可以指定查询的起止时间和其他过滤条件 </li>
<li>FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种</li>
</ul>
<ol>
<li><p>不进行填充NONE(默认填充模式)。</p></li>
<li><p>VALUE填充固定值填充此时需要指定填充的数值。例如fill(value, 1.23)。</p></li>
<li><p>NULL填充使用NULL填充数据。例如fill(null)。</p></li>
<li><p>PREV填充使用前一个非NULL值填充数据。例如fill(prev)。 </p></li>
</ol>
<p>说明: </p>
<ol>
<li>使用FILL语句的时候可能生成大量的填充输出务必指定查询的时间区间。针对每次查询系统可返回不超过1千万条具有插值的结果。</li>
<li>在时间维度聚合中,返回的结果中时间序列严格单调递增。</li>
<li>如果查询对象是超级表则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用group by语句则返回的结果按照时间序列严格单调递增如果查询中使用了group by语句分组则返回结果中每个group内不按照时间序列严格单调递增。</li>
</ol>
<p><strong>示例:</strong>温度数据表的建表语句如下:</p>
<pre><code class="mysql language-mysql">create table sensor(ts timestamp, degree double, pm25 smallint) </code></pre>
<p>针对传感器采集的数据以10分钟为一个阶段计算过去24小时的温度数据的平均值、最大值、温度的中位数、以及随着时间变化的温度走势拟合直线。如果没有计算值用前一个非NULL值填充。</p>
<pre><code class="mysql language-mysql">SELECT AVG(degree),MAX(degree),LEASTSQUARES(degree), PERCENTILE(degree, 50) FROM sensor
WHERE TS&gt;=NOW-1d
INTERVAL(10m)
FILL(PREV);</code></pre><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,137 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/administrator/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/administrator/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>Administrator</h1>
<a class='anchor' id='Directory-and-Files'></a><h2>Directory and Files</h2>
<p>After TDengine is installed, by default, the following directories will be created:</p>
<figure><table>
<thead>
<tr>
<th>Directory/File</th>
<th style="text-align:left;">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>/etc/taos/taos.cfg</td>
<td style="text-align:left;">TDengine configuration file</td>
</tr>
<tr>
<td>/usr/local/taos/driver</td>
<td style="text-align:left;">TDengine dynamic link library</td>
</tr>
<tr>
<td>/var/lib/taos</td>
<td style="text-align:left;">TDengine default data directory</td>
</tr>
<tr>
<td>/var/log/taos</td>
<td style="text-align:left;">TDengine default log directory</td>
</tr>
<tr>
<td>/usr/local/taos/bin.</td>
<td style="text-align:left;">TDengine executables</td>
</tr>
</tbody>
</table></figure>
<a class='anchor' id='Executables'></a><h3>Executables</h3>
<p>All TDengine executables are located at <em>/usr/local/taos/bin</em> , including:</p>
<ul>
<li><code>taosd</code>TDengine server </li>
<li><code>taos</code> TDengine Shell, the command line interface.</li>
<li><code>taosdump</code>TDengine data export tool</li>
<li><code>rmtaos</code> a script to uninstall TDengine</li>
</ul>
<p>You can change the data directory and log directory setting through the system configuration file</p>
<a class='anchor' id='Configuration-on-Server'></a><h2>Configuration on Server</h2>
<p><code>taosd</code> is running on the server side, you can change the system configuration file taos.cfg to customize its behavior. By default, taos.cfg is located at /etc/taos, but you can specify the path to configuration file via the command line parameter -c. For example: <code>taosd -c /home/user</code> means the configuration file will be read from directory /home/user.</p>
<p>This section lists only the most important configuration parameters. Please check taos.cfg to find all the configurable parameters. <strong>Note: to make your new configurations work, you have to restart taosd after you change taos.cfg</strong>.</p>
<ul>
<li>mgmtShellPort: TCP and UDP port between client and TDengine mgmt (default: 6030). Note: 5 successive UDP ports (6030-6034) starting from this number will be used.</li>
<li>vnodeShellPort: TCP and UDP port between client and TDengine vnode (default: 6035). Note: 5 successive UDP ports (6035-6039) starting from this number will be used.</li>
<li>httpPort: TCP port for RESTful service (default: 6020)</li>
<li>dataDir: data directory, default is /var/lib/taos</li>
<li>maxUsers: maximum number of users allowed</li>
<li>maxDbs: maximum number of databases allowed</li>
<li>maxTables: maximum number of tables allowed</li>
<li>enableMonitor: turn on/off system monitoring, 0: off, 1: on</li>
<li>logDir: log directory, default is /var/log/taos</li>
<li>numOfLogLines: maximum number of lines in the log file</li>
<li>debugFlag: log level, 131: only error and warnings, 135: all</li>
</ul>
<p>In different scenarios, data characteristics are different. For example, the retention policy, data sampling period, record size, the number of devices, and data compression may be different. To gain the best performance, you can change the following configurations related to storage: </p>
<ul>
<li>days: number of days to cover for a data file</li>
<li>keep: number of days to keep the data</li>
<li>rows: number of rows of records in a block in data file.</li>
<li>comp: compression algorithm, 0: off, 1: standard; 2: maximum compression</li>
<li>ctime: period (seconds) to flush data to disk</li>
<li>clog: flag to turn on/off Write Ahead Log, 0: off, 1: on </li>
<li>tables: maximum number of tables allowed in a vnode</li>
<li>cache: cache block size (bytes)</li>
<li>tblocks: maximum number of cache blocks for a table</li>
<li>abloks: average number of cache blocks for a table </li>
<li>precision: timestamp precision, us: microsecond ms: millisecond, default is ms</li>
</ul>
<p>For an application, there may be multiple data scenarios. The best design is to put all data with the same characteristics into one database. One application may have multiple databases, and every database has its own configuration to maximize the system performance. You can specify the above configurations related to storage when you create a database. For example: </p>
<pre><code class="mysql language-mysql">CREATE DATABASE demo DAYS 10 CACHE 16000 ROWS 2000 </code></pre>
<p>The above SQL statement will create a database demo, with 10 days for each data file, 16000 bytes for a cache block, and 2000 rows in a file block.</p>
<p>The configuration provided when creating a database will overwrite the configuration in taos.cfg. </p>
<a class='anchor' id='Configuration-on-Client'></a><h2>Configuration on Client</h2>
<p><em>taos</em> is the TDengine shell and is a client that connects to taosd. TDengine uses the same configuration file taos.cfg for the client, with default location at /etc/taos. You can change it by specifying command line parameter -c when you run taos. For example, <em>taos -c /home/user</em>, it will read the configuration file taos.cfg from directory /home/user.</p>
<p>The parameters related to client configuration are listed below: </p>
<ul>
<li>masterIP: IP address of TDengine server</li>
<li>charset: character set, default is the system . For data type nchar, TDengine uses unicode to store the data. Thus, the client needs to tell its character set.</li>
<li>locale: system language setting</li>
<li>defaultUser: default login user, default is root</li>
<li>defaultPass: default password, default is taosdata</li>
</ul>
<p>For TCP/UDP port, and system debug/log configuration, it is the same as the server side.</p>
<p>For server IP, user name, password, you can always specify them in the command line when you run taos. If they are not specified, they will be read from the taos.cfg</p>
<a class='anchor' id='User-Management'></a><h2>User Management</h2>
<p>System administrator (user root) can add, remove a user, or change the password from the TDengine shell. Commands are listed below:</p>
<p>Create a user, password shall be quoted with the single quote.</p>
<pre><code class="mysql language-mysql">CREATE USER user_name PASS password</code></pre>
<p>Remove a user</p>
<pre><code class="mysql language-mysql">DROP USER user_name</code></pre>
<p>Change the password for a user</p>
<pre><code class="mysql language-mysql">ALTER USER user_name PASS password </code></pre>
<p>List all users</p>
<pre><code class="mysql language-mysql">SHOW USERS</code></pre>
<a class='anchor' id='Import-Data'></a><h2>Import Data</h2>
<p>Inside the TDengine shell, you can import data into TDengine from either a script or CSV file</p>
<p><strong>Import from Script</strong></p>
<pre><code>source &lt;filename&gt;</code></pre>
<p>Inside the file, you can put all SQL statements there. Each SQL statement has a line. If a line starts with "#", it means comments, it will be skipped. The system will execute the SQL statements line by line automatically until the ends </p>
<p><strong>Import from CVS</strong></p>
<pre><code class="mysql language-mysql">insert into tb1 file a.csv b.csv tb2 c.csv …
import into tb1 file a.csv b.csv tb2 c.csv …</code></pre>
<p>Each csv file contains records for only one table, and the data structure shall be the same as the defined schema for the table. </p>
<a class='anchor' id='Export-Data'></a><h2>Export Data</h2>
<p>You can export data either from TDengine shell or from tool taosdump.</p>
<p><strong>Export from TDengine Shell</strong></p>
<pre><code class="mysql language-mysql">select * from &lt;tb_name&gt; &gt;&gt; a.csv</code></pre>
<p>The above SQL statement will dump the query result set into a csv file. </p>
<p><strong>Export Using taosdump</strong></p>
<p>TDengine provides a data dumping tool taosdump. You can choose to dump a database, a table, all data or data only a time range, even only the metadata. For example:</p>
<ul>
<li>Export one or more tables in a DB: taosdump [OPTION…] dbname tbname …</li>
<li>Export one or more DBs: taosdump [OPTION…] --databases dbname…</li>
<li>Export all DBs (excluding system DB): taosdump [OPTION…] --all-databases</li>
</ul>
<p>run <em>taosdump —help</em> to get a full list of the options</p>
<a class='anchor' id='Management-of-Connections,-Streams,-Queries'></a><h2>Management of Connections, Streams, Queries</h2>
<p>The system administrator can check, kill the ongoing connections, streams, or queries. </p>
<pre><code>SHOW CONNECTIONS</code></pre>
<p>It lists all connections, one column shows ip:port from the client. </p>
<pre><code>KILL CONNECTION &lt;connection-id&gt;</code></pre>
<p>It kills the connection, where connection-id is the ip:port showed by "SHOW CONNECTIONS". You can copy and paste it.</p>
<pre><code>SHOW QUERIES</code></pre>
<p>It shows the ongoing queries, one column ip:port:id shows the ip:port from the client, and id assigned by the system</p>
<pre><code>KILL QUERY &lt;query-id&gt;</code></pre>
<p>It kills the query, where query-id is the ip:port:id showed by "SHOW QUERIES". You can copy and paste it.</p>
<pre><code>SHOW STREAMS</code></pre>
<p>It shows the continuous queries, one column shows the ip:port:id, where ip:port is the connection from the client, and id assigned by the system.</p>
<pre><code>KILL STREAM &lt;stream-id&gt;</code></pre>
<p>It kills the continuous query, where stream-id is the ip:port:id showed by "SHOW STREAMS". You can copy and paste it.</p>
<a class='anchor' id='System-Monitor'></a><h2>System Monitor</h2>
<p>TDengine runs a system monitor in the background. Once it is started, it will create a database sys automatically. System monitor collects the metric like CPU, memory, network, disk, number of requests periodically, and writes them into database sys. Also, TDengine will log all important actions, like login, logout, create database, drop database and so on, and write them into database sys. </p>
<p>You can check all the saved monitor information from database sys. By default, system monitor is turned on. But you can turn it off by changing the parameter in the configuration file.</p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,41 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/advanced-features/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/advanced-features/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>Advanced Features</h1>
<a class='anchor' id='Continuous-Query'></a><h2>Continuous Query</h2>
<p>Continuous Query is a query executed by TDengine periodically with a sliding window, it is a simplified stream computing driven by timers, not by events. Continuous query can be applied to a table or a STable, and the result set can be passed to the application directly via call back function, or written into a new table in TDengine. The query is always executed on a specified time window (window size is specified by parameter interval), and this window slides forward while time flows (the sliding period is specified by parameter sliding). </p>
<p>Continuous query is defined by TAOS SQL, there is nothing special. One of the best applications is downsampling. Once it is defined, at the end of each cycle, the system will execute the query, pass the result to the application or write it to a database. </p>
<p>If historical data pints are inserted into the stream, the query won't be re-executed, and the result set won't be updated. If the result set is passed to the application, the application needs to keep the status of continuous query, the server won't maintain it. If application re-starts, it needs to decide the time where the stream computing shall be started.</p>
<h4>How to use continuous query</h4>
<ul>
<li><p>Pass result set to application</p>
<p>Application shall use API taos_stream (details in connector section) to start the stream computing. Inside the API, the SQL syntax is:</p>
<pre><code class="sql language-sql">SELECT aggregation FROM [table_name | stable_name]
INTERVAL(window_size) SLIDING(period)</code></pre>
<p>where the new keyword INTERVAL specifies the window size, and SLIDING specifies the sliding period. If parameter sliding is not specified, the sliding period will be the same as window size. The minimum window size is 10ms. The sliding period shall not be larger than the window size. If you set a value larger than the window size, the system will adjust it to window size automatically.</p>
<p>For example:</p>
<pre><code class="sql language-sql">SELECT COUNT(*) FROM FOO_TABLE
INTERVAL(1M) SLIDING(30S)</code></pre>
<p>The above SQL statement will count the number of records for the past 1-minute window every 30 seconds.</p></li>
<li><p>Save the result into a database</p>
<p>If you want to save the result set of stream computing into a new table, the SQL shall be: </p>
<pre><code class="sql language-sql">CREATE TABLE table_name AS
SELECT aggregation from [table_name | stable_name]
INTERVAL(window_size) SLIDING(period)</code></pre>
<p>Also, you can set the time range to execute the continuous query. If no range is specified, the continuous query will be executed forever. For example, the following continuous query will be executed from now and will stop in one hour.</p>
<pre><code class="sql language-sql">CREATE TABLE QUERY_RES AS
SELECT COUNT(*) FROM FOO_TABLE
WHERE TS &gt; NOW AND TS &lt;= NOW + 1H
INTERVAL(1M) SLIDING(30S) </code></pre></li>
</ul>
<a class='anchor' id='Manage-the-Continuous-Query'></a><h3>Manage the Continuous Query</h3>
<p>Inside TDengine shell, you can use the command "show streams" to list the ongoing continuous queries, the command "kill stream" to kill a specific continuous query. </p>
<p>If you drop a table generated by the continuous query, the query will be removed too.</p>
<a class='anchor' id='Publisher/Subscriber'></a><h2>Publisher/Subscriber</h2>
<p>Time series data is a sequence of data points over time. Inside a table, the data points are stored in order of timestamp. Also, there is a data retention policy, the data points will be removed once their lifetime is passed. From another view, a table in TDengine is just a standard message queue. </p>
<p>To reduce the development complexity and improve data consistency, TDengine provides the pub/sub functionality. To publish a message, you simply insert a record into a table. Compared with popular messaging tool Kafka, you subscribe to a table or a SQL query statement, instead of a topic. Once new data points arrive, TDengine will notify the application. The process is just like Kafka. </p>
<p>The detailed API will be introduced in the <a href="https://www.taosdata.com/en/documentation/advanced-features/">connectors</a> section. </p>
<a class='anchor' id='Caching'></a><h2>Caching</h2>
<p>TDengine allocates a fixed-size buffer in memory, the newly arrived data will be written into the buffer first. Every device or table gets one or more memory blocks. For typical IoT scenarios, the hot data shall always be newly arrived data, they are more important for timely analysis. Based on this observation, TDengine manages the cache blocks in First-In-First-Out strategy. If no enough space in the buffer, the oldest data will be saved into hard disk first, then be overwritten by newly arrived data. TDengine also guarantees every device can keep at least one block of data in the buffer. </p>
<p>By this design, the application can retrieve the latest data from each device super-fast, since they are all available in memory. You can use last or last_row function to return the last data record. If the super table is used, it can be used to return the last data records of all or a subset of devices. For example, to retrieve the latest temperature from thermometers in located Beijing, execute the following SQL </p>
<pre><code class="mysql language-mysql">select last(*) from thermometers where location=beijing</code></pre>
<p>By this design, caching tool, like Redis, is not needed in the system. It will reduce the complexity of the system. </p>
<p>TDengine creates one or more virtual nodes(vnode) in each data node. Each vnode contains data for multiple tables and has its own buffer. The buffer of a vnode is fully separated from the buffer of another vnode, not shared. But the tables in a vnode share the same buffer. </p>
<p>System configuration parameter cacheBlockSize configures the cache block size in bytes, and another parameter cacheNumOfBlocks configures the number of cache blocks. The total memory for the buffer of a vnode is $cacheBlockSize \times cacheNumOfBlocks$. Another system parameter numOfBlocksPerMeter configures the maximum number of cache blocks a table can use. When you create a database, you can specify these parameters. </p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

View File

@ -1,93 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/connections-with-other-tools/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/connections-with-other-tools/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>Connect with other tools</h1>
<a class='anchor' id='Telegraf'></a><h2>Telegraf</h2>
<p>TDengine is easy to integrate with <a href="https://www.influxdata.com/time-series-platform/telegraf/">Telegraf</a>, an open-source server agent for collecting and sending metrics and events, without more development.</p>
<a class='anchor' id='Install-Telegraf'></a><h3>Install Telegraf</h3>
<p>At present, TDengine supports Telegraf newer than version 1.7.4. Users can go to the <a href="https://portal.influxdata.com/downloads">download link</a> and choose the proper package to install on your system.</p>
<a class='anchor' id='Configure-Telegraf'></a><h3>Configure Telegraf</h3>
<p>Telegraf is configured by changing items in the configuration file <em>/etc/telegraf/telegraf.conf</em>.</p>
<p>In <strong>output plugins</strong> sectionadd <em>[[outputs.http]]</em> iterm </p>
<ul>
<li><em>url</em>: http://ip:6020/telegraf/udb, in which <em>ip</em> is the IP address of any node in TDengine cluster. Port 6020 is the RESTful APT port used by TDengine. <em>udb</em> is the name of the database to save data, which needs to create beforehand.</li>
<li><em>method</em>: "POST" </li>
<li><em>username</em>: username to login TDengine</li>
<li><em>password</em>: password to login TDengine </li>
<li><em>data_format</em>: "json"</li>
<li><em>json_timestamp_units</em>: "1ms"</li>
</ul>
<p>In <strong>agent</strong> part</p>
<ul>
<li>hostname: used to distinguish different machines. Need to be unique.</li>
<li>metric_batch_size: 30the maximum number of records allowed to write in Telegraf. The larger the value is, the less frequent requests are sent. For TDengine, the value should be less than 50.</li>
</ul>
<p>Please refer to the <a href="https://docs.influxdata.com/telegraf/v1.11/">Telegraf docs</a> for more information.</p>
<a class='anchor' id='Grafana'></a><h2>Grafana</h2>
<p><a href="https://grafana.com">Grafana</a> is an open-source system for time-series data display. It is easy to integrate TDengine and Grafana to build a monitor system. Data saved in TDengine can be fetched and shown on the Grafana dashboard.</p>
<a class='anchor' id='Install-Grafana'></a><h3>Install Grafana</h3>
<p>For now, TDengine only supports Grafana newer than version 5.2.4. Users can go to the <a href="https://grafana.com/grafana/download">Grafana download page</a> for the proper package to download.</p>
<a class='anchor' id='Configure-Grafana'></a><h3>Configure Grafana</h3>
<p>TDengine Grafana plugin is in the <em>/usr/local/taos/connector/grafana</em> directory.
Taking Centos 7.2 as an example, just copy TDengine directory to <em>/var/lib/grafana/plugins</em> directory and restart Grafana.</p>
<a class='anchor' id='Use-Grafana'></a><h3>Use Grafana</h3>
<p>Users can log in the Grafana server (username/password:admin/admin) through localhost:3000 to configure TDengine as the data source. As is shown in the picture below, TDengine as a data source option is shown in the box:</p>
<p><img src="../assets/clip_image001.png" alt="img" /></p>
<p>When choosing TDengine as the data source, the Host in HTTP configuration should be configured as the IP address of any node of a TDengine cluster. The port should be set as 6020. For example, when TDengine and Grafana are on the same machine, it should be configured as _http://localhost:6020. </p>
<p>Besides, users also should set the username and password used to log into TDengine. Then click <em>Save&Test</em> button to save.</p>
<p><img src="../assets/clip_image001-2474914.png" alt="img" /></p>
<p>Then, TDengine as a data source should show in the Grafana data source list.</p>
<p><img src="../assets/clip_image001-2474939.png" alt="img" /></p>
<p>Then, users can create Dashboards in Grafana using TDengine as the data source:</p>
<p><img src="../assets/clip_image001-2474961.png" alt="img" /></p>
<p>Click <em>Add Query</em> button to add a query and input the SQL command you want to run in the <em>INPUT SQL</em> text box. The SQL command should expect a two-row, multi-column result, such as <em>SELECT count(*) FROM sys.cpu WHERE ts>=from and ts<to interval(interval)</em>, in which, <em>from</em>, <em>to</em> and <em>inteval</em> are TDengine inner variables representing query time range and time interval.</p>
<p><em>ALIAS BY</em> field is to set the query alias. Click <em>GENERATE SQL</em> to send the command to TDengine:</p>
<p><img src="../assets/clip_image001-2474987.png" alt="img" /></p>
<p>Please refer to the <a href="https://grafana.com/docs/">Grafana official document</a> for more information about Grafana.</p>
<a class='anchor' id='Matlab'></a><h2>Matlab</h2>
<p>Matlab can connect to and retrieve data from TDengine by TDengine JDBC Driver.</p>
<a class='anchor' id='MatLab-and-TDengine-JDBC-adaptation'></a><h3>MatLab and TDengine JDBC adaptation</h3>
<p>Several steps are required to adapt Matlab to TDengine. Taking adapting Matlab2017a on Windows10 as an example:</p>
<ol>
<li>Copy the file <em>JDBCDriver-1.0.0-dist.jar</em> in TDengine package to the directory <em>${matlab_root}\MATLAB\R2017a\java\jar\toolbox</em></li>
<li>Copy the file <em>taos.lib</em> in TDengine package to <em>${matlab</em> root <em>dir}\MATLAB\R2017a\lib\win64</em></li>
<li>Add the .jar package just copied to the Matlab classpath. Append the line below as the end of the file of <em>${matlab</em> root <em>dir}\MATLAB\R2017a\toolbox\local\classpath.txt</em></li>
</ol>
<p> <code>$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar</code></p>
<ol start="4">
<li>Create a file called <em>javalibrarypath.txt</em> in directory <em>${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a_, and add the _taos.dll</em> path in the file. For example, if the file <em>taos.dll</em> is in the directory of <em>C:\Windows\System32</em>then add the following line in file <em>javalibrarypath.txt</em>:</li>
</ol>
<p> <code>C:\Windows\System32</code></p>
<a class='anchor' id='TDengine-operations-in-Matlab'></a><h3>TDengine operations in Matlab</h3>
<p>After correct configuration, open Matlab:</p>
<ul>
<li><p>build a connection</p>
<p><code>conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)</code></p></li>
<li><p>Query</p>
<p><code>sql0 = [select * from tb]</code></p>
<p><code>data = select(conn, sql0);</code></p></li>
<li><p>Insert a record:</p>
<p><code>sql1 = [insert into tb values (now, 1)]</code></p>
<p><code>exec(conn, sql1)</code></p></li>
</ul>
<p>Please refer to the file <em>examples\Matlab\TDengineDemo.m</em> for more information.</p>
<a class='anchor' id='R'></a><h2>R</h2>
<p>Users can use R language to access the TDengine server with the JDBC interface. At first, install JDBC package in R:</p>
<pre><code class="R language-R">install.packages('rJDBC', repos='http://cran.us.r-project.org')</code></pre>
<p>Then use <em>library</em> function to load the package:</p>
<pre><code class="R language-R">library('RJDBC')</code></pre>
<p>Then load the TDengine JDBC driver:</p>
<pre><code class="R language-R">drv&lt;-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-1.0.0-dist.jar", identifier.quote="\"")</code></pre>
<p>If succeed, no error message will display. Then use the following command to try a database connection:</p>
<pre><code class="R language-R">conn&lt;-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&amp;password=taosdata","root","taosdata")</code></pre>
<p>Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully. TDengine supports below functions in <em>RJDBC</em> package:</p>
<ul>
<li><em>dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)</em>: write the data in a data frame <em>iris</em> to the table <em>test</em> in the TDengine server. Parameter <em>overwrite</em> must be <em>false</em>. <em>append</em> must be <em>TRUE</em> and the schema of the data frame <em>iris</em> should be the same as the table <em>test</em>.</li>
<li><em>dbGetQuery(conn, "select count(*) from test")</em>: run a query command</li>
<li><em>dbSendUpdate(conn, "use db")</em>: run any non-query command.</li>
<li><em>dbReadTable(conn, "test"</em>): read all the data in table <em>test</em></li>
<li><em>dbDisconnect(conn)</em>: close a connection</li>
<li><em>dbRemoveTable(conn, "test")</em>: remove table <em>test</em></li>
</ul>
<p>Below functions are <strong>not supported</strong> currently:</p>
<ul>
<li><em>dbExistsTable(conn, "test")</em>: if talbe <em>test</em> exists</li>
<li><em>dbListTables(conn)</em>: list all tables in the connection</li>
</ul><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,354 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/connector/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/connector/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>TDengine connectors</h1>
<p>TDengine provides many connectors for development, including C/C++, JAVA, Python, RESTful, Go, Node.JS, etc.</p>
<a class='anchor' id='C/C++-API'></a><h2>C/C++ API</h2>
<p>C/C++ APIs are similar to the MySQL APIs. Applications should include TDengine head file <em>taos.h</em> to use C/C++ APIs by adding the following line in code:</p>
<pre><code class="C language-C">#include &lt;taos.h&gt;</code></pre>
<p>Make sure TDengine library <em>libtaos.so</em> is installed and use <em>-ltaos</em> option to link the library when compiling. The return values of all APIs are <em>-1</em> or <em>NULL</em> for failure.</p>
<a class='anchor' id='C/C++-sync-API'></a><h3>C/C++ sync API</h3>
<p>Sync APIs are those APIs waiting for responses from the server after sending a request. TDengine has the following sync APIs:</p>
<ul>
<li><p><code>TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)</code></p>
<p>Open a connection to a TDengine server. The parameters are <em>ip</em> (IP address of the server), <em>user</em> (username to login), <em>pass</em> (password to login), <em>db</em> (database to use after connection) and <em>port</em> (port number to connect). The parameter <em>db</em> can be NULL for no database to use after connection. Otherwise, the database should exist before connection or a connection error is reported. The handle returned by this API should be kept for future use.</p></li>
<li><p><code>void taos_close(TAOS *taos)</code></p>
<p>Close a connection to a TDengine server by the handle returned by <em>taos_connect</em>`</p></li>
<li><p><code>int taos_query(TAOS *taos, char *sqlstr)</code></p>
<p>The API used to run a SQL command. The command can be DQL or DML. The parameter <em>taos</em> is the handle returned by <em>taos_connect</em>. Return value <em>-1</em> means failure.</p></li>
<li><p><code>TAOS_RES *taos_use_result(TAOS *taos)</code></p>
<p>Use the result after running <em>taos_query</em>. The handle returned should be kept for future fetch.</p></li>
<li><p><code>TAOS_ROW taos_fetch_row(TAOS_RES *res)</code></p>
<p>Fetch a row of return results through <em>res</em>, the handle returned by <em>taos_use_result</em>.</p></li>
<li><p><code>int taos_num_fields(TAOS_RES *res)</code></p>
<p>Get the number of fields in the return result.</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)</code></p>
<p>Fetch the description of each field. The description includes the property of data type, field name, and bytes. The API should be used with <em>taos_num_fields</em> to fetch a row of data.</p></li>
<li><p><code>void taos_free_result(TAOS_RES *res)</code></p>
<p>Free the resources used by a result set. Make sure to call this API after fetching results or memory leak would happen.</p></li>
<li><p><code>void taos_init()</code></p>
<p>Initialize the environment variable used by TDengine client. The API is not necessary since it is called int <em>taos_connect</em> by default.</p></li>
<li><p><code>char *taos_errstr(TAOS *taos)</code></p>
<p>Return the reason of the last API call failure. The return value is a string.</p></li>
<li><p><code>int *taos_errno(TAOS *taos)</code></p>
<p>Return the error code of the last API call failure. The return value is an integer.</p></li>
<li><p><code>int taos_options(TSDB_OPTION option, const void * arg, ...)</code></p>
<p>Set client options. The parameter <em>option</em> supports values of <em>TSDB_OPTION_CONFIGDIR</em> (configuration directory), <em>TSDB_OPTION_SHELL_ACTIVITY_TIMER</em>, <em>TSDB_OPTION_LOCALE</em> (client locale) and <em>TSDB_OPTION_TIMEZONE</em> (client timezone).</p></li>
</ul>
<p>The 12 APIs are the most important APIs frequently used. Users can check <em>taos.h</em> file for more API information.</p>
<p><strong>Note</strong>: The connection to a TDengine server is not multi-thread safe. So a connection can only be used by one thread.</p>
<a class='anchor' id='C/C++-async-API'></a><h3>C/C++ async API</h3>
<p>In addition to sync APIs, TDengine also provides async APIs, which are more efficient. Async APIs are returned right away without waiting for a response from the server, allowing the application to continute with other tasks without blocking. So async APIs are more efficient, especially useful when in a poor network.</p>
<p>All async APIs require callback functions. The callback functions have the format:</p>
<pre><code class="C language-C">void fp(void *param, TAOS_RES * res, TYPE param3)</code></pre>
<p>The first two parameters of the callback function are the same for all async APIs. The third parameter is different for different APIs. Generally, the first parameter is the handle provided to the API for action. The second parameter is a result handle.</p>
<ul>
<li><p><code>void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int code), void *param);</code></p>
<p>The async query interface. <em>taos</em> is the handle returned by <em>taos_connect</em> interface. <em>sqlstr</em> is the SQL command to run. <em>fp</em> is the callback function. <em>param</em> is the parameter required by the callback function. The third parameter of the callback function <em>code</em> is <em>0</em> (for success) or a negative number (for failure, call taos_errstr to get the error as a string). Applications mainly handle with the second parameter, the returned result set.</p></li>
<li><p><code>void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);</code></p>
<p>The async API to fetch a batch of rows, which should only be used with a <em>taos_query_a</em> call. The parameter <em>res</em> is the result handle returned by <em>taos_query_a</em>. <em>fp</em> is the callback function. <em>param</em> is a user-defined structure to pass to <em>fp</em>. The parameter <em>numOfRows</em> is the number of result rows in the current fetch cycle. In the callback function, applications should call <em>taos_fetch_row</em> to get records from the result handle. After getting a batch of results, applications should continue to call <em>taos_fetch_rows_a</em> API to handle the next batch, until the <em>numOfRows</em> is <em>0</em> (for no more data to fetch) or <em>-1</em> (for failure).</p></li>
<li><p><code>void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);</code></p>
<p>The async API to fetch a result row. <em>res</em> is the result handle. <em>fp</em> is the callback function. <em>param</em> is a user-defined structure to pass to <em>fp</em>. The third parameter of the callback function is a single result row, which is different from that of <em>taos_fetch_rows_a</em> API. With this API, it is not necessary to call <em>taos_fetch_row</em> to retrieve each result row, which is handier than <em>taos_fetch_rows_a</em> but less efficient.</p></li>
</ul>
<p>Applications may apply operations on multiple tables. However, <strong>it is important to make sure the operations on the same table are serialized</strong>. That means after sending an insert request in a table to the server, no operations on the table are allowed before a response is received.</p>
<a class='anchor' id='C/C++-continuous-query-interface'></a><h3>C/C++ continuous query interface</h3>
<p>TDengine provides APIs for continuous query driven by time, which run queries periodically in the background. There are only two APIs:</p>
<ul>
<li><p><code>TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *));</code></p>
<p>The API is used to create a continuous query.</p></li>
<li><p><em>taos</em>: the connection handle returned by <em>taos_connect</em>.</p></li>
<li><p><em>sqlstr</em>: the SQL string to run. Only query commands are allowed.</p></li>
<li><p><em>fp</em>: the callback function to run after a query</p></li>
<li><p><em>param</em>: a parameter passed to <em>fp</em></p></li>
<li><p><em>stime</em>: the time of the stream starts in the form of epoch milliseconds. If <em>0</em> is given, the start time is set as the current time.</p></li>
<li><p><em>callback</em>: a callback function to run when the continuous query stops automatically.</p>
<p>The API is expected to return a handle for success. Otherwise, a NULL pointer is returned.</p></li>
<li><p><code>void taos_close_stream (TAOS_STREAM *tstr)</code></p>
<p>Close the continuous query by the handle returned by <em>taos_open_stream</em>. Make sure to call this API when the continuous query is not needed anymore.</p></li>
</ul>
<a class='anchor' id='C/C++-subscription-API'></a><h3>C/C++ subscription API</h3>
<p>For the time being, TDengine supports subscription on one table. It is implemented through periodic pulling from a TDengine server. </p>
<ul>
<li><p><code>TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, long time, int mseconds)</code>
The API is used to start a subscription session by given a handle. The parameters required are <em>host</em> (IP address of a TDenginer server), <em>user</em> (username), <em>pass</em> (password), <em>db</em> (database to use), <em>table</em> (table name to subscribe), <em>time</em> (start time to subscribe, 0 for now), <em>mseconds</em> (pulling period). If failed to open a subscription session, a <em>NULL</em> pointer is returned.</p></li>
<li><p><code>TAOS_ROW taos_consume(TAOS_SUB *tsub)</code>
The API used to get the new data from a TDengine server. It should be put in an infinite loop. The parameter <em>tsub</em> is the handle returned by <em>taos_subscribe</em>. If new data are updated, the API will return a row of the result. Otherwise, the API is blocked until new data arrives. If <em>NULL</em> pointer is returned, it means an error occurs.</p></li>
<li><p><code>void taos_unsubscribe(TAOS_SUB *tsub)</code>
Stop a subscription session by the handle returned by <em>taos_subscribe</em>.</p></li>
<li><p><code>int taos_num_fields(TAOS_SUB *tsub)</code>
The API used to get the number of fields in a row.</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)</code>
The API used to get the description of each column.</p></li>
</ul>
<a class='anchor' id='Java-Connector'></a><h2>Java Connector</h2>
<a class='anchor' id='JDBC-Interface'></a><h3>JDBC Interface</h3>
<p>TDengine provides a JDBC driver <code>taos-jdbcdriver-x.x.x.jar</code> for Enterprise Java developers. TDengine's JDBC Driver is implemented as a subset of the standard JDBC 3.0 Specification and supports the most common Java development frameworks. The driver is currently not published to the online dependency repositories such as Maven Center Repository, and users should manually add the <code>.jar</code> file to their local dependency repository.</p>
<p>Please note the JDBC driver itself relies on a native library written in C. On a Linux OS, the driver relies on a <code>libtaos.so</code> native library, where .so stands for "Shared Object". After the successful installation of TDengine on Linux, <code>libtaos.so</code> should be automatically copied to <code>/usr/local/lib/taos</code> and added to the system's default search path. On a Windows OS, the driver relies on a <code>taos.dll</code> native library, where .dll stands for "Dynamic Link Library". After the successful installation of the TDengine client on Windows, the <code>taos-jdbcdriver.jar</code> file can be found in <code>C:/TDengine/driver/JDBC</code>; the <code>taos.dll</code> file can be found in <code>C:/TDengine/driver/C</code> and should have been automatically copied to the system's searching path <code>C:/Windows/System32</code>. </p>
<p>Developers can refer to the Oracle's official JDBC API documentation for detailed usage on classes and methods. However, there are some differences of connection configurations and supported methods in the driver implementation between TDengine and traditional relational databases. </p>
<p>For database connections, TDengine's JDBC driver has the following configurable parameters in the JDBC URL. The standard format of a TDengine JDBC URL is:</p>
<p><code>jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&amp;password={password}|&amp;charset={charset}|&amp;cfgdir={config_dir}|&amp;locale={locale}|&amp;timezone={timezone}]</code></p>
<p>where <code>{}</code> marks the required parameters and <code>[]</code> marks the optional. The usage of each parameter is pretty straightforward:</p>
<ul>
<li>user - login user name for TDengine; by default, it's <code>root</code></li>
<li>password - login password; by default, it's <code>taosdata</code></li>
<li>charset - the client-side charset; by default, it's the operation system's charset</li>
<li>cfgdir - the directory of TDengine client configuration file; by default it's <code>/etc/taos</code> on Linux and <code>C:\TDengine/cfg</code> on Windows</li>
<li>locale - the language environment of TDengine client; by default, it's the operation system's locale</li>
<li>timezone - the timezone of the TDengine client; by default, it's the operation system's timezone </li>
</ul>
<p>All parameters can be configured at the time when creating a connection using the java.sql.DriverManager class, for example:</p>
<pre><code class="java language-java">import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
import com.taosdata.jdbc.TSDBDriver;
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&amp;password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMEZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}</code></pre>
<p>Except <code>cfgdir</code>, all the parameters listed above can also be configured in the configuration file. The properties specified when calling DriverManager.getConnection() has the highest priority among all configuration methods. The JDBC URL has the second-highest priority, and the configuration file has the lowest priority. The explicitly configured parameters in a method with higher priorities always overwrite that same parameter configured in methods with lower priorities. For example, if <code>charset</code> is explicitly configured as "UTF-8" in the JDBC URL and "GKB" in the <code>taos.cfg</code> file, then "UTF-8" will be used.</p>
<p>Although the JDBC driver is implemented following the JDBC standard as much as possible, there are major differences between TDengine and traditional databases in terms of data models that lead to the differences in the driver implementation. Here is a list of head-ups for developers who have plenty of experience on traditional databases but little on TDengine:</p>
<ul>
<li>TDengine does NOT support updating or deleting a specific record, which leads to some unsupported methods in the JDBC driver</li>
<li>TDengine currently does not support <code>join</code> or <code>union</code> operations, and thus, is lack of support for associated methods in the JDBC driver</li>
<li>TDengine supports batch insertions which are controlled at the level of SQL statement writing instead of API calls</li>
<li>TDengine doesn't support nested queries and neither does the JDBC driver. Thus for each established connection to TDengine, there should be only one open result set associated with it</li>
</ul>
<p>All the error codes and error messages can be found in <code>TSDBError.java</code> . For a more detailed coding example, please refer to the demo project <code>JDBCDemo</code> in TDengine's code examples. </p>
<a class='anchor' id='Python-Connector'></a><h2>Python Connector</h2>
<a class='anchor' id='Pre-requirement'></a><h3>Pre-requirement</h3>
<li>TDengine installed, TDengine-client installed if on Windows</li>
<li>python 2.7 or >= 3.4</li>
<li>pip installed </li>
<a class='anchor' id='Installation'></a><h3>Installation</h3>
<a class='anchor' id='Linux'></a><h4>Linux</h4>
<p>Users can find python client packages in our source code directory <em>src/connector/python</em>. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use <em>pip</em> command to install:</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/linux/python2/</code></pre>
<p>or</p>
<pre><code>pip install src/connector/python/linux/python3/</code></pre>
<a class='anchor' id='Windows'></a><h4>Windows</h4>
<p>Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the <em>cmd</em> Windows command interface</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python2\</code></pre>
<p>or</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python3\</code></pre>
<p>* If <em>pip</em> command is not installed on the system, users can choose to install pip or just copy the <em>taos</em> directory in the python client directory to the application directory to use.</p>
<a class='anchor' id='Usage'></a><h3>Usage</h3>
<a class='anchor' id='Examples'></a><h4>Examples</h4>
<li>import TDengine module at first:</li>
<pre><code class="python language-python">import taos </code></pre>
<li>get the connection</li>
<pre><code>
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
</code></pre>
<p>* <em>host is the IP of TDengine server, and config is the directory where exists the TDengine client configure file</em></p>
<li>insert records into the database</li>
<pre><code>
import datetime
# create a database
c1.execute('create database db')
c1.execute('use db')
# create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# insert a record
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# insert multiple records in a batch
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>query the database</li>
<code><pre>
c1.execute('select * from tb')
# fetch all returned results
data = c1.fetchall()
# data is a list of returned rows with each row being a tuple
numOfRows = c1.rowcount
numOfCols = c1.descriptions
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# use the cursor as an iterator to retrieve all returned results
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
</pre></code>
<li>close the connection</li>
<code><pre>
c1.close()
conn.close()
</pre></code>
<a class='anchor' id='Help information''></a><h4>Help information</h4>
<p>Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:</p>
<ul>
<li><p><em>TaosConnection</em> class</p>
<p>Run <code>help(taos.TaosConnection)</code> in python terminal for details.</p></li>
<li><p><em>TaosCursor</em> class</p>
<p>Run <code>help(taos.TaosCursor)</code> in python terminal for details.</p></li>
<li><p>connect method</p>
<p>Open a connection. Run <code>help(taos.connect)</code> in python terminal for details.</p></li>
</ul>
<a class='anchor' id='RESTful-Connector'></a><h2>RESTful Connector</h2>
<p>TDengine also provides RESTful API to satisfy developing on different platforms. Unlike other databases, TDengine RESTful API applies operations to the database through the SQL command in the body of HTTP POST request. What users are required to provide is just a URL.</p>
<p>For the time being, TDengine RESTful API uses a <em>\<TOKEN></em> generated from username and password for identification. Safer identification methods will be provided in the future.</p>
<a class='anchor' id='HTTP-URL-encoding'></a><h3>HTTP URL encoding</h3>
<p>To use TDengine RESTful API, the URL should have the following encoding format:</p>
<pre><code>http://&lt;ip&gt;:&lt;PORT&gt;/rest/sql</code></pre>
<ul>
<li><em>ip</em>: IP address of any node in a TDengine cluster</li>
<li><em>PORT</em>: TDengine HTTP service port. It is 6020 by default.</li>
</ul>
<p>For example, the URL encoding <em>http://192.168.0.1:6020/rest/sql</em> used to send HTTP request to a TDengine server with IP address as 192.168.0.1.</p>
<p>It is required to add a token in an HTTP request header for identification.</p>
<pre><code>Authorization: Basic &lt;TOKEN&gt;</code></pre>
<p>The HTTP request body contains the SQL command to run. If the SQL command contains a table name, it should also provide the database name it belongs to in the form of <code>&lt;db_name&gt;.&lt;tb_name&gt;</code>. Otherwise, an error code is returned.</p>
<p>For example, use <em>curl</em> command to send a HTTP request:</p>
<pre><code>curl -H 'Authorization: Basic &lt;TOKEN&gt;' -d '&lt;SQL&gt;' &lt;ip&gt;:&lt;PORT&gt;/rest/sql</code></pre>
<p>or use</p>
<pre><code>curl -u username:password -d '&lt;SQL&gt;' &lt;ip&gt;:&lt;PORT&gt;/rest/sql</code></pre>
<p>where <code>TOKEN</code> is the encryted string of <code>{username}:{password}</code> using the Base64 algorithm, e.g. <code>root:taosdata</code> will be encoded as <code>cm9vdDp0YW9zZGF0YQ==</code></p>
<a class='anchor' id='HTTP-response'></a><h3>HTTP response</h3>
<p>The HTTP resonse is in JSON format as below:</p>
<pre><code>{
"status": "succ",
"head": ["column1","column2", …],
"data": [
["2017-12-12 23:44:25.730", 1],
["2017-12-12 22:44:25.728", 4]
],
"rows": 2
} </code></pre>
<p>Specifically,</p>
<ul>
<li><em>status</em>: the result of the operation, success or failure</li>
<li><em>head</em>: description of returned result columns</li>
<li><em>data</em>: the returned data array. If no data is returned, only an <em>affected_rows</em> field is listed</li>
<li><em>rows</em>: the number of rows returned</li>
</ul>
<a class='anchor' id='Example'></a><a class='anchor' id='Example'></a><h3>Example</h3>
<ul>
<li><p>Use <em>curl</em> command to query all the data in table <em>t1</em> of database <em>demo</em>:</p>
<p><code>curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql</code></p></li>
</ul>
<p>The return value is like:</p>
<pre><code>{
"status": "succ",
"head": ["column1","column2","column3"],
"data": [
["2017-12-12 23:44:25.730", 1, 2.3],
["2017-12-12 22:44:25.728", 4, 5.6]
],
"rows": 2
}</code></pre>
<ul>
<li><p>Use HTTP to create a database</p>
<p><code>curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql</code></p>
<p>The return value should be:</p></li>
</ul>
<pre><code>{
"status": "succ",
"head": ["affected_rows"],
"data": [[1]],
"rows": 1,
}</code></pre>
<a class='anchor' id='Go-Connector'></a><h2>Go Connector</h2>
<p>TDengine also provides a Go client package named <em>taosSql</em> for users to access TDengine with Go. The package is in <em>/usr/local/taos/connector/go/src/taosSql</em> by default if you installed TDengine. Users can copy the directory <em>/usr/local/taos/connector/go/src/taosSql</em> to the <em>src</em> directory of your project and import the package in the source code for use.</p>
<pre><code class="Go language-Go">import (
"database/sql"
_ "taosSql"
)</code></pre>
<p>The <em>taosSql</em> package is in <em>cgo</em> form, which calls TDengine C/C++ sync interfaces. So a connection is allowed to be used by one thread at the same time. Users can open multiple connections for multi-thread operations.</p>
<p>Please refer the the demo code in the package for more information.</p>
<a class='anchor' id='Node.js-Connector'></a><h2>Node.js Connector</h2>
<p>TDengine also provides a node.js connector package that is installable through <a href="https://www.npmjs.com/">npm</a>. The package is also in our source code at <em>src/connector/nodejs/</em>. The following instructions are also available <a href="https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs">here</a></p>
<p>To get started, just type in the following to install the connector through <a href="https://www.npmjs.com/">npm</a>.</p>
<pre><code class="cmd language-cmd">npm install td-connector</code></pre>
<p>It is highly suggested you use npm. If you don't have it installed, you can also just copy the nodejs folder from <em>src/connector/nodejs/</em> into your node project folder.</p>
<p>To interact with TDengine, we make use of the <a href="https://github.com/nodejs/node-gyp">node-gyp</a> library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp)</p>
<a class='anchor' id='On-Unix'></a><h3>On Unix</h3>
<ul>
<li><code>python</code> (<code>v2.7</code> recommended, <code>v3.x.x</code> is <strong>not</strong> supported)</li>
<li><code>make</code></li>
<li>A proper C/C++ compiler toolchain, like <a href="https://gcc.gnu.org">GCC</a></li>
</ul>
<a class='anchor' id='On-macOS'></a><h3>On macOS</h3>
<ul>
<li><p><code>python</code> (<code>v2.7</code> recommended, <code>v3.x.x</code> is <strong>not</strong> supported) (already installed on macOS)</p></li>
<li><p>Xcode</p></li>
<li><p>You also need to install the </p>
<pre><code>Command Line Tools</code></pre>
<p>via Xcode. You can find this under the menu </p>
<pre><code>Xcode -&gt; Preferences -&gt; Locations</code></pre>
<p>(or by running </p>
<pre><code>xcode-select --install</code></pre>
<p>in your Terminal) </p>
<ul>
<li>This step will install <code>gcc</code> and the related toolchain containing <code>make</code></li></ul></li>
</ul>
<a class='anchor' id='On-Windows'></a><h3>On Windows</h3>
<h4>Option 1</h4>
<p>Install all the required tools and configurations using Microsoft's <a href="https://github.com/felixrieseberg/windows-build-tools">windows-build-tools</a> using <code>npm install --global --production windows-build-tools</code> from an elevated PowerShell or CMD.exe (run as Administrator).</p>
<h4>Option 2</h4>
<p>Install tools and configuration manually:</p>
<ul>
<li>Install Visual C++ Build Environment: <a href="https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools">Visual Studio Build Tools</a> (using "Visual C++ build tools" workload) or <a href="https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community">Visual Studio 2017 Community</a> (using the "Desktop development with C++" workload)</li>
<li>Install <a href="https://www.python.org/downloads/">Python 2.7</a> (<code>v3.x.x</code> is not supported), and run <code>npm config set python python2.7</code> (or see below for further instructions on specifying the proper Python version and path.)</li>
<li>Launch cmd, <code>npm config set msvs_version 2017</code></li>
</ul>
<p>If the above steps didn't work for you, please visit <a href="https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules">Microsoft's Node.js Guidelines for Windows</a> for additional tips.</p>
<p>To target native ARM64 Node.js on Windows 10 on ARM, add the components "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".</p>
<a class='anchor' id='Usage'></a><h3>Usage</h3>
<p>The following is a short summary of the basic usage of the connector, the full api and documentation can be found <a href="http://docs.taosdata.com/node">here</a></p>
<h4>Connection</h4>
<p>To use the connector, first require the library <code>td-connector</code>. Running the function <code>taos.connect</code> with the connection options passed in as an object will return a TDengine connection object. The required connection option is <code>host</code>, other options if not set, will be the default values as shown below.</p>
<p>A cursor also needs to be initialized in order to interact with TDengine from Node.js.</p>
<pre><code class="javascript language-javascript">const taos = require('td-connector');
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
var cursor = conn.cursor(); // Initializing a new cursor</code></pre>
<p>To close a connection, run</p>
<pre><code class="javascript language-javascript">conn.close();</code></pre>
<h4>Queries</h4>
<p>We can now start executing simple queries through the <code>cursor.query</code> function, which returns a TaosQuery object.</p>
<pre><code class="javascript language-javascript">var query = cursor.query('show databases;')</code></pre>
<p>We can get the results of the queries through the <code>query.execute()</code> function, which returns a promise that resolves with a TaosResult object, which contains the raw data and additional functionalities such as pretty printing the results.</p>
<pre><code class="javascript language-javascript">var promise = query.execute();
promise.then(function(result) {
result.pretty(); //logs the results to the console as if you were in the taos shell
});</code></pre>
<p>You can also query by binding parameters to a query by filling in the question marks in a string as so. The query will automatically parse what was binded and convert it to the proper format for use with TDengine</p>
<pre><code class="javascript language-javascript">var query = cursor.query('select * from meterinfo.meters where ts &lt;= ? and areaid = ?;').bind(new Date(), 5);
query.execute().then(function(result) {
result.pretty();
})</code></pre>
<p>The TaosQuery object can also be immediately executed upon creation by passing true as the second argument, returning a promise instead of a TaosQuery.</p>
<pre><code class="javascript language-javascript">var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
promise.then(function(result) {
result.pretty();
})</code></pre>
<h4>Async functionality</h4>
<p>Async queries can be performed using the same functions such as <code>cursor.execute</code>, <code>cursor.query</code>, but now with <code>_a</code> appended to them.</p>
<p>Say you want to execute an two async query on two seperate tables, using <code>cursor.query_a</code>, you can do that and get a TaosQuery object, which upon executing with the <code>execute_a</code> function, returns a promise that resolves with a TaosResult object.</p>
<pre><code class="javascript language-javascript">var promise1 = cursor.query_a('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
var promise2 = cursor.query_a('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
promise1.then(function(result) {
result.pretty();
})
promise2.then(function(result) {
result.pretty();
})</code></pre>
<h3>Example</h3>
<p>An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found <a href="https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js">here</a> (The preferred method for using the connector)</p>
<p>An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found <a href="https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js">here</a></p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,20 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/contributor_license_agreement/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/contributor_license_agreement/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>TaosData Contributor License Agreement</h1>
<p>This TaosData Contributor License Agreement (CLA) applies to any contribution you make to any TaosData projects. If you are representing your employing organization to sign this agreement, please warrant that you have the authority to grant the agreement.</p>
<a class='anchor' id='Terms'></a><h2>Terms</h2>
<p><strong>"TaosData"</strong>, <strong>"we"</strong>, <strong>"our"</strong> and <strong>"us"</strong> means TaosData, inc.</p>
<p><strong>"You"</strong> and <strong>"your"</strong> means you or the organization you are on behalf of to sign this agreement.</p>
<p><strong>"Contribution"</strong> means any original work you, or the organization you represent submit to TaosData for any project in any manner.</p>
<a class='anchor' id='Copyright-License'></a><h2>Copyright License</h2>
<p>All rights of your Contribution submitted to TaosData in any manner are granted to TaosData and recipients of software distributed by TaosData. You waive any rights that my affect our ownership of the copyright and grant to us a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable, and sublicensable license to use, reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Contributions and any derivative work created based on a Contribution.</p>
<a class='anchor' id='Patent-License'></a><h2>Patent License</h2>
<p>With respect to any patents you own or that you can license without payment to any third party, you grant to us and to any recipient of software distributed by us, a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable patent license to make, have make, use, sell, offer to sell, import, and otherwise transfer the Contribution in whole or in part, alone or included in any product under any patent you own, or license from a third party, that is necessarily infringed by the Contribution or by combination of the Contribution with any Work.</p>
<a class='anchor' id='Your-Representations-and-Warranties'></a><h2>Your Representations and Warranties</h2>
<p>You represent and warrant that:</p>
<ul>
<li><p>the Contribution you submit is an original work that you can legally grant the rights set out in this agreement.</p></li>
<li><p>the Contribution you submit and licenses you granted does not and will not, infringe the rights of any third party.</p></li>
<li><p>you are not aware of any pending or threatened claims, suits, actions, or charges pertaining to the contributions. You also warrant to notify TaosData immediately if you become aware of any such actual or potential claims, suits, actions, allegations or charges.</p></li>
</ul>
<a class='anchor' id='Support'></a><h2>Support</h2>
<p>You are not obligated to support your Contribution except you volunteer to provide support. If you want, you can provide for a fee.</p>
<p><strong>I agree and accept on behalf of myself and behalf of my organization:</strong></p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,129 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/data-model-and-architecture/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/data-model-and-architecture/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>Data Model and Architecture</h1>
<a class='anchor' id='Data-Model'></a><h2>Data Model</h2>
<a class='anchor' id='A-Typical-IoT-Scenario'></a><h3>A Typical IoT Scenario</h3>
<p>In a typical IoT scenario, there are many types of devices. Each device is collecting one or multiple metrics. For a specific type of device, the collected data looks like the table below: </p>
<figure><table>
<thead>
<tr>
<th style="text-align:center;">Device ID</th>
<th style="text-align:center;">Time Stamp</th>
<th style="text-align:center;">Value 1</th>
<th style="text-align:center;">Value 2</th>
<th style="text-align:center;">Value 3</th>
<th style="text-align:center;">Tag 1</th>
<th style="text-align:center;">Tag 2</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:center;">D1001</td>
<td style="text-align:center;">1538548685000</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">219</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Red</td>
<td style="text-align:center;">Tesla</td>
</tr>
<tr>
<td style="text-align:center;">D1002</td>
<td style="text-align:center;">1538548684000</td>
<td style="text-align:center;">10.2</td>
<td style="text-align:center;">220</td>
<td style="text-align:center;">0.23</td>
<td style="text-align:center;">Blue</td>
<td style="text-align:center;">BMW</td>
</tr>
<tr>
<td style="text-align:center;">D1003</td>
<td style="text-align:center;">1538548686500</td>
<td style="text-align:center;">11.5</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.35</td>
<td style="text-align:center;">Black</td>
<td style="text-align:center;">Honda</td>
</tr>
<tr>
<td style="text-align:center;">D1004</td>
<td style="text-align:center;">1538548685500</td>
<td style="text-align:center;">13.4</td>
<td style="text-align:center;">223</td>
<td style="text-align:center;">0.29</td>
<td style="text-align:center;">Red</td>
<td style="text-align:center;">Volvo</td>
</tr>
<tr>
<td style="text-align:center;">D1001</td>
<td style="text-align:center;">1538548695000</td>
<td style="text-align:center;">12.6</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.33</td>
<td style="text-align:center;">Red</td>
<td style="text-align:center;">Tesla</td>
</tr>
<tr>
<td style="text-align:center;">D1004</td>
<td style="text-align:center;">1538548696600</td>
<td style="text-align:center;">11.8</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.28</td>
<td style="text-align:center;">Black</td>
<td style="text-align:center;">Honda</td>
</tr>
</tbody>
</table></figure>
<p>Each data record has device ID, timestamp, the collected metrics, and static tags associated with the device. Each device generates a data record in a pre-defined timer or triggered by an event. It is a sequence of data points, like a stream. </p>
<a class='anchor' id='Data-Characteristics'></a><h3>Data Characteristics</h3>
<p>Being a series of data points over time, data points generated by devices, sensors, servers, or applications have strong common characteristics. </p>
<ol>
<li>metric is always structured data; </li>
<li>there are rarely delete/update operations on collected data; </li>
<li>there is only one single data source for one device or sensor; </li>
<li>ratio of read/write is much lower than typical Internet application; </li>
<li>the user pays attention to the trend of data, not the specific value at a specific time; </li>
<li>there is always a data retention policy; </li>
<li>the data query is always executed in a given time range and a subset of devices; </li>
<li>real-time aggregation or analytics is mandatory; </li>
<li>traffic is predictable based on the number of devices and sampling frequency; </li>
<li>data volume is huge, a system may generate 10 billion data points in a day. </li>
</ol>
<p>By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data. The system efficiency is improved significantly. </p>
<a class='anchor' id='Relational-Database-Model'></a><h3>Relational Database Model</h3>
<p>Since time-series data is more likely to be structured data, TDengine adopts the traditional relational database model to process them. You need to create a database, create tables with schema definition, then insert data points and execute queries to explore the data. Standard SQL is used, there is no learning curve. </p>
<a class='anchor' id='One-Table-for-One-Device'></a><h3>One Table for One Device</h3>
<p>Due to different network latency, the data points from different devices may arrive at the server out of order. But for the same device, data points will arrive at the server in order if system is designed well. To utilize this special feature, TDengine requires the user to create a table for each device (time-stream). For example, if there are over 10,000 smart meters, 10,000 tables shall be created. For the table above, 4 tables shall be created for device D1001, D1002, D1003 and D1004, to store the data collected. </p>
<p>This strong requirement can guarantee the data points from a device can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one device in a time range, this design will reduce the read latency significantly since a whole block is owned by one single device. Also, write latency can be significantly reduced too, since the data points generated by the same device will arrive in order, the new data point will be simply appended to a block. Cache block size and the rows of records in a file block can be configured to fit the scenarios.</p>
<a class='anchor' id='Best-Practices'></a><h3>Best Practices</h3>
<p><strong>Table</strong>: TDengine suggests to use device ID as the table name (like D1001 in the above diagram). Each device may collect one or more metrics (like value1, valu2, valu3 in the diagram). Each metric has a column in the table, the metric name can be used as the column name. The data type for a column can be int, float, double, tinyint, bigint, bool or binary. Sometimes, a device may have multiple metric group, each group have different sampling period, you shall create a table for each group for each device. The first column in the table must be time stamp. TDengine uses time stamp as the index, and wont build the index on any metrics stored.</p>
<p><strong>Tags:</strong> to support aggregation over multiple tables efficiently, <a href="../super-table">STable(Super Table)</a> concept is introduced by TDengine. A STable is used to represent the same type of device. The schema is used to define the collected metrics(like value1, value2, value3 in the diagram), and tags are used to define the static attributes for each table or device(like tag1, tag2 in the diagram). A table is created via STable with a specific tag value. All or a subset of tables in a STable can be aggregated by filtering tag values. </p>
<p><strong>Database:</strong> different types of devices may generate data points in different patterns and shall be processed differently. For example, sampling frequency, data retention policy, replication number, cache size, record size, the compression algorithm may be different. To make the system more efficient, TDengine suggests creating a different database with unique configurations for different scenarios </p>
<p><strong>Schemaless vs Schema:</strong> compared with NoSQL database, since a table with schema definition shall be created before the data points can be inserted, flexibilities are not that good, especially when the schema is changed. But in most IoT scenarios, the schema is well defined and is rarely changed, the loss of flexibilities wont be a big pain to developers or the administrator. TDengine allows the application to change the schema in a second even there is a huge amount of historical data when schema has to be changed. </p>
<p>TDengine does not impose a limitation on the number of tables, <a href="../super-table">STables</a>, or databases. You can create any number of STable or databases to fit the scenarios. </p>
<a class='anchor' id='Architecture'></a><h2>Architecture</h2>
<p>There are two main modules in TDengine server as shown in Picture 1: <strong>Management Module (MGMT)</strong> and <strong>Data Module(DNODE)</strong>. The whole TDengine architecture also includes a <strong>TDengine Client Module</strong>.</p>
<p><center> <img src="../assets/structure.png"> </center>
<center> Picture 1 TDengine Architecture </center></p>
<a class='anchor' id='MGMT-Module'></a><h3>MGMT Module</h3>
<p>The MGMT module deals with the storage and querying on metadata, which includes information about users, databases, and tables. Applications will connect to the MGMT module at first when connecting the TDengine server. When creating/dropping databases/tables, The request is sent to the MGMT module at first to create/delete metadata. Then the MGMT module will send requests to the data module to allocate/free resources required. In the case of writing or querying, applications still need to visit MGMT module to get meta data, according to which, then access the DNODE module.</p>
<a class='anchor' id='DNODE-Module'></a><h3>DNODE Module</h3>
<p>The DNODE module is responsible for storing and querying data. For the sake of future scaling and high-efficient resource usage, TDengine applies virtualization on resources it uses. TDengine introduces the concept of virtual node (vnode), which is the unit of storage, resource allocation and data replication (enterprise edition). As is shown in Picture 2, TDengine treats each data node as an aggregation of vnodes. </p>
<p>When a DB is created, the system will allocate a vnode. Each vnode contains multiple tables, but a table belongs to only one vnode. Each DB has one or mode vnodes, but one vnode belongs to only one DB. Each vnode contains all the data in a set of tables. Vnodes have their own cache, directory to store data. Resources between different vnodes are exclusive with each other, no matter cache or file directory. However, resources in the same vnode are shared between all the tables in it. By virtualization, TDengine can distribute resources reasonably to each vnode and improve resource usage and concurrency. The number of vnodes on a dnode is configurable according to its hardware resources.</p>
<p><center> <img src="../assets/vnode.png"> </center>
<center> Picture 2 TDengine Virtualization </center> </p>
<a class='anchor' id='Client-Module'></a><h3>Client Module</h3>
<p>TDengine client module accepts requests (mainly in SQL form) from applications and converts the requests to internal representations and sends to the server side. TDengine supports multiple interfaces, which are all built on top of TDengine client module. </p>
<p>For the communication between client and MGMT module, TCP/UDP is used, the port is set by the parameter mgmtShellPort in system configuration file taos.cfg, default is 6030. For the communication between client and DNODE module, TCP/UDP is used, the port is set by the parameter vnodeShellPort in the system configuration file, default is 6035. </p>
<a class='anchor' id='Writing-Process'></a><h2>Writing Process</h2>
<p>Picture 3 shows the full writing process of TDengine. TDengine uses <a href="WAL">Writing Ahead Log</a> strategy to assure data security and integrity. Data received from the client is written to the commit log at first. When TDengine recovers from crashes caused by power lose or other situations, the commit log is used to recover data. After writting to commit log, data will be wrtten to the corresponding vnode cache, then an acknowledgment is sent to the application. There are two mechanisms that can flush data in cache to disk for persistent storage:</p>
<ol>
<li><strong>Flush driven by timer</strong>: There is a backend timer which flushes data in cache periodically to disks. The period is configurable via parameter commitTime in system configuration file taos.cfg.</li>
<li><strong>Flush driven by data</strong>: Data in the cache is also flushed to disks when the left buffer size is below a threshold. Flush driven by data can reset the timer of flush driven by the timer.</li>
</ol>
<p><center> <img src="../assets/write_process.png"> </center>
<center> Picture 3 TDengine Writting Process </center></p>
<p>New commit log file will be opened when the committing process begins. When the committing process finishes, the old commit file will be removed.</p>
<a class='anchor' id='Data-Storage'></a><h2>Data Storage</h2>
<p>TDengine data are saved in <em>/var/lib/taos</em> directory by default. It can be changed to other directories by setting the parameter dataDir in system configuration file taos.cfg.</p>
<p>TDengine's metadata includes the database, table, user, super table and tag information. To reduce the latency, metadata are all buffered in the cache.</p>
<p>Data records saved in tables are sharded according to the time range. Data of tables in the same vnode in a certain time range are saved in the same file group. This sharding strategy can effectively improve data searching speed. By default, one group of files contain data in 10 days, which can be configured by <em>daysPerFile</em> in the configuration file or by <em>DAYS</em> keyword in <em>CREATE DATABASE</em> clause. </p>
<p>Data records are removed automatically once their lifetime is passed. The lifetime is configurable via parameter daysToKeep in the system configuration file. The default value is 3650 days. </p>
<p>Data in files are blockwise. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp. To improve the compression ratio, records are stored column by column, and the different compression algorithm is applied based on each column's data type. </p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,27 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/faq/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/faq/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>FAQ</h1>
<h4>1. When encountered with the error "failed to connect to server", what can I do?</h4>
<p>The client may encounter connection errors. Please follow the steps below for troubleshooting:</p>
<ol>
<li>On the server side, execute <code>systemctl status taosd</code> to check the status of <em>taosd</em> service. If <em>taosd</em> is not running, start it and retry connecting.</li>
<li>Make sure you have used the correct server IP address to connect to.</li>
<li>Ping the server. If no response is received, check your network connection.</li>
<li>Check the firewall setting, make sure the TCP/UDP ports from 6030-6039 are enabled.</li>
<li>For JDBC, ODBC, Python, Go connections on Linux, make sure the native library <em>libtaos.so</em> are located at /usr/local/lib/taos, and /usr/local/lib/taos is in the <em>LD_LIBRARY_PATH</em>. </li>
<li>For JDBC, ODBC, Python, Go connections on Windows, make sure <em>driver/c/taos.dll</em> is in the system search path (or you can copy taos.dll into <em>C:\Windows\System32</em>)</li>
<li>If the above steps can not help, try the network diagnostic tool <em>nc</em> to check if TCP/UDP port works
check UDP port<code>nc -vuz {hostIP} {port}</code>
check TCP port on server: <code>nc -l {port}</code>
check TCP port on client: <code>nc {hostIP} {port}</code></li>
</ol>
<h4>2. Why I get "Invalid SQL" error when a query is syntactically correct?</h4>
<p>If you are sure your query has correct syntax, please check the length of the SQL string, it shall be less than 64KB. </p>
<h4>3. Why I could not delete a super table?</h4>
<p>Please make sure there are no tables under the super table. You could not delete a super table which still has associated tables. </p>
<h4>4. Does TDengine support validation queries?</h4>
<p>For the time being, TDengine does not have a specific set of validation queries. However, TDengine comes with a system monitoring database named 'sys', which can usually be used as a validation query object. </p>
<h4>5. Can I delete or update a record that has been written into TDengine?</h4>
<p>The answer is NO. The design of TDengine is based on the assumption that records are generated by the connected devices, you won't be allowed to change it. But TDengine provides a retention policy, the data records will be removed once their lifetime is passed.</p>
<h4>6. How do I create a table with more than 250 columns?</h4>
<p>For a single table, the maximum number of columns is 250. If for some reason, 250 columns are still not quite enough, our suggestion is to split the huge table into several smaller ones.</p>
<h4>7. What is the most efficient way to write data to TDengine?</h4>
<p>TDengine supports several different writing regimes. The most efficient way to write data to TDengine is to use batch inserting. For details on batch insertion syntax, please refer to <a href="../documentation/taos-sql">Taos SQL</a></p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,88 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/getting-started/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/getting-started/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>Getting Started</h1>
<a class='anchor' id='Quick-Start'></a><h2>Quick Start</h2>
<p>At the moment, TDengine only runs on Linux. You can set up and install it either from the <a href='#Install-from-Source'>source code</a> or the <a href='#Install-from-Package'>packages</a>. It takes only a few seconds from download to run it successfully. </p>
<a class='anchor' id='Install-from-Source'></a><h3>Install from Source</h3>
<p>Please visit our <a href="https://github.com/taosdata/TDengine">github page</a> for instructions on installation from the source code.</p>
<a class='anchor' id='Install-from-Package'></a><h3>Install from Package</h3>
<p>Three different packages are provided, please pick up the one you like. </p>
<ul id='packageList'>
<li><a id='tdengine-rpm' style='color:var(--b2)'>TDengine RPM package (1.5M)</a></li>
<li><a id='tdengine-deb' style='color:var(--b2)'>TDengine DEB package (1.7M)</a></li>
<li><a id='tdengine-tar' style='color:var(--b2)'>TDengine Tarball (3.0M)</a></li>
</ul>
<p>For the time being, TDengine only supports installation on Linux systems using <a href="https://en.wikipedia.org/wiki/Systemd"><code>systemd</code></a> as the service manager. To check if your system has <em>systemd</em>, use the <em>which</em> command.</p>
<pre><code class="cmd language-cmd">which systemd</code></pre>
<p>If the <code>systemd</code> command is not found, please <a href="#Install-from-Source">install from source code</a>. </p>
<a class='anchor' id='Running-TDengine'></a><h3>Running TDengine</h3>
<p>After installation, start the TDengine service by the <code>systemctl</code> command.</p>
<pre><code class="cmd language-cmd">systemctl start taosd</code></pre>
<p>Then check if the server is working now.</p>
<pre><code class="cmd language-cmd">systemctl status taosd</code></pre>
<p>If the service is running successfully, you can play around through TDengine shell <code>taos</code>, the command line interface tool located in directory /usr/local/bin/taos </p>
<p><strong>Note: The <em>systemctl</em> command needs the root privilege. Use <em>sudo</em> if you are not the <em>root</em> user.</strong></p>
<a class='anchor' id='TDengine-Shell'></a><h2>TDengine Shell</h2>
<p>To launch TDengine shell, the command line interface, in a Linux terminal, type:</p>
<pre><code class="cmd language-cmd">taos</code></pre>
<p>The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our <a href="../faq">FAQ</a> page for troubleshooting the connection error). The TDengine shell prompt is: </p>
<pre><code class="cmd language-cmd">taos&gt;</code></pre>
<p>In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:</p>
<pre><code class="mysql language-mysql">create database db;
use db;
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 10:00:00', 10);
insert into t values ('2019-07-15 10:01:05', 20);
select * from t;
ts | speed |
===================================
19-07-15 10:00:00.000| 10|
19-07-15 10:01:05.000| 20|
Query OK, 2 row(s) in set (0.001700s)</code></pre>
<p>Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.</p>
<a class='anchor' id='Shell-Command-Line-Parameters'></a><h3>Shell Command Line Parameters</h3>
<p>You can run <code>taos</code> command with command line options to fit your needs. Some frequently used options are listed below:</p>
<ul>
<li>-c, --config-dir: set the configuration directory. It is <em>/etc/taos</em> by default</li>
<li>-h, --host: set the IP address of the server it will connect to, Default is localhost</li>
<li>-s, --commands: set the command to run without entering the shell</li>
<li>-u, -- user: user name to connect to server. Default is root</li>
<li>-p, --password: password. Default is 'taosdata'</li>
<li>-?, --help: get a full list of supported options </li>
</ul>
<p>Examples:</p>
<pre><code class="cmd language-cmd">taos -h 192.168.0.1 -s "use db; show tables;"</code></pre>
<a class='anchor' id='Run-Batch-Commands'></a><h3>Run Batch Commands</h3>
<p>Inside TDengine shell, you can run batch commands in a file with <em>source</em> command.</p>
<pre><code>taos&gt; source &lt;filename&gt;;</code></pre>
<a class='anchor' id='Tips'></a><h3>Tips</h3>
<ul>
<li>Use up/down arrow key to check the command history</li>
<li>To change the default password, use "<code>alter user</code>" command </li>
<li>ctrl+c to interrupt any queries </li>
<li>To clean the cached schema of tables or STables, execute command <code>RESET QUERY CACHE</code> </li>
</ul>
<a class='anchor' id='Major-Features'></a><h2>Major Features</h2>
<p>The core functionality of TDengine is the time-series database. To reduce the development and management complexity, and to improve the system efficiency further, TDengine also provides caching, pub/sub messaging system, and stream computing functionalities. It provides a full stack for IoT big data platform. The detailed features are listed below:</p>
<ul>
<li><p>SQL like query language used to insert or explore data</p></li>
<li><p>C/C++, Java(JDBC), Python, Go, RESTful, and Node.JS interfaces for development</p></li>
<li><p>Ad hoc queries/analysis via Python/R/Matlab or TDengine shell</p></li>
<li><p>Continuous queries to support sliding-window based stream computing</p></li>
<li><p>Super table to aggregate multiple time-streams efficiently with flexibility </p></li>
<li><p>Aggregation over a time window on one or multiple time-streams</p></li>
<li><p>Built-in messaging system to support publisher/subscriber model</p></li>
<li><p>Built-in cache for each time stream to make latest data available as fast as light speed</p></li>
<li><p>Transparent handling of historical data and real-time data </p></li>
<li><p>Integrating with Telegraf, Grafana and other tools seamlessly </p></li>
<li><p>A set of tools or configuration to manage TDengine </p></li>
</ul>
<p>For enterprise edition, TDengine provides more advanced features below:</p>
<ul>
<li><p>Linear scalability to deliver higher capacity/throughput </p></li>
<li><p>High availability to guarantee the carrier-grade service </p></li>
<li><p>Built-in replication between nodes which may span multiple geographical sites </p></li>
<li><p>Multi-tier storage to make historical data management simpler and cost-effective</p></li>
<li><p>Web-based management tools and other tools to make maintenance simpler</p></li>
</ul>
<p>TDengine is specially designed and optimized for time-series data processing in IoT, connected cars, Industrial IoT, IT infrastructure and application monitoring, and other scenarios. Compared with other solutions, it is 10x faster on insert/query speed. With a single-core machine, over 20K requestes can be processed, millions data points can be ingested, and over 10 million data points can be retrieved in a second. Via column-based storage and tuned compression algorithm for different data types, less than 1/10 storage space is required. </p>
<a class='anchor' id='Explore-More-on-TDengine'></a><h2>Explore More on TDengine</h2>
<p>Please read through the whole <a href='../documentation'>documentation</a> to learn more about TDengine.</p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,269 +0,0 @@
.documentation strong {
font-weight:600;
}
.documentation {
overflow:hidden;
margin-bottom: 10rem;
}
.documentation a {
font-size:1em;
text-decoration: none;
}
.documentation > a > h2 {
cursor:pointer;
color:var(--sg1);
}
.documentation > a >h2:hover {
color:var(--b2);
}
.documentation a:hover {
text-decoration: none;
}
.documentation pre {
margin-top: 0;
margin-bottom: 7px;
overflow: auto;
-ms-overflow-style: scrollbar;
margin-top: 7px;
}
pre * {
font-family:monospace !important
}
.documentation a {
color:var(--b2);
padding-bottom: 2px;
position: relative;
font-style: normal;
cursor: pointer;
}
.documentation a:hover,a:focus {
text-decoration: none;
color:var(--b2);
}
.documentation a::before {
content: "";
left: 0;
background-color: var(--b2);
width: 0%;
height: 1px;
top:-webkit-calc(1em + 8px);
top:calc(1em + 8px);
position: absolute;
z-index: 2;
-webkit-transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
-o-transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;;
}
.documentation a:hover::before, .documentation a:focus::before {
content: "";
left: 0;
background-color: var(--b2);
width: 100%;
height: 1px;
top:-webkit-calc(1em + 8px);
top:calc(1em + 8px);
position: absolute;
z-index: 2;
-webkit-transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
-o-transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
transition: background-color 0.2s, height 0.2s, top 0.2s, width 0.2s;
text-decoration: none;
}
.documentation img {
width:100%;
max-width:640px;
margin-left: 50%;
-webkit-transform: translate(-50%,0);
-ms-transform: translate(-50%,0);
transform: translate(-50%,0);
}
h1,
h2,
h3,
h4,
h5,
h6 {
position: relative;
margin-bottom: 0.5rem;
font-weight: 500;
line-height: 1.4;
cursor: text;
}
h1:hover a.anchor,
h2:hover a.anchor,
h3:hover a.anchor,
h4:hover a.anchor,
h5:hover a.anchor,
h6:hover a.anchor {
text-decoration: none;
}
h1 tt,
h1 code {
font-size: inherit;
}
h2 tt,
h2 code {
font-size: inherit;
}
h3 tt,
h3 code {
font-size: inherit;
}
h4 tt,
h4 code {
font-size: inherit;
}
h5 tt,
h5 code {
font-size: inherit;
}
h6 tt,
h6 code {
font-size: inherit;
}
h1 {
font-size: 2.5rem;
line-height: 1.8;
}
h2 {
font-size: 1.7rem;
line-height: 1.8;
padding-left: 0.5em;
}
.documentation h2::before {
content:"";
height:1em;;
display: block;
width:3px;
margin-left: -0.5em;
margin-top: 0.4em;
position: absolute;
background-color: var(--b1);
}
h3 {
font-size: 1.4rem;
line-height: 1.43;
}
h4 {
font-size: 1.25rem;
}
h5 {
font-size: 1rem;
}
h6 {
font-size: 1rem;
color: #777;
}
p {
margin-bottom:0.5rem;
font-size:1em;
margin-top:0;
font-weight:300;
}
ol,ul,dl {
margin-top:0;
margin-bottom: 1rem;
}
li p {
margin-bottom: 0;
}
blockquote,
table{
margin: 0.8em 0;
width:100%;
}
figure table{
overflow: scroll;
}
hr {
height: 2px;
padding: 0;
margin: 16px 0;
background-color: #e7e7e7;
border: 0 none;
overflow: hidden;
-webkit-box-sizing: content-box;
box-sizing: content-box;
}
li p.first {
display: inline-block;
}
ul,
ol {
padding-left: 30px;
}
ul:first-child,
ol:first-child {
margin-top: 0;
}
ul:last-child,
ol:last-child {
margin-bottom: 0;
}
blockquote {
border-left: 4px solid #dfe2e5;
padding: 0 15px;
color: #777777;
}
blockquote blockquote {
padding-right: 0;
}
table {
padding: 0;
word-break: initial;
}
table tr {
border-top: 1px solid #dfe2e5;
margin: 0;
padding: 0;
}
table tr:nth-child(2n),
thead {
background-color: #f8f8f8;
}
table tr th {
font-weight: bold;
border: 1px solid #dfe2e5;
border-bottom: 0;
text-align: left;
margin: 0;
padding: 6px 13px;
}
table tr td {
border: 1px solid #dfe2e5;
text-align: left;
margin: 0;
padding: 6px 13px;
}
table tr th:first-child,
table tr td:first-child {
margin-top: 0;
}
table tr th:last-child,
table tr td:last-child {
margin-bottom: 0;
}
h1 code,h2 code, h3 code, h4 code, h5 code, h6 code,
p code, li code, td code,
tt {
border: 1px solid #e7eaed;
background-color: #f8f8f8;
-webkit-border-radius: 3px;
border-radius: 3px;
padding: 0;
font-size: 0.9em;
color:var(--sg1);
font-family:monospace;
background-color: #f3f4f4;
padding: 0 2px 0 2px;
}
/*Tell prettyprinted code not to follow above*/
.prettyprint code{
border:none;
background-color:transparent;
font-size:inherit;
padding:0 1px 0 0px;
}

View File

@ -1,19 +0,0 @@
/*JS to determine how many lines used in pre/code block, sets CSS appropriately. MUST be placed after elements with prettyprint class are loaded*/
$('.prettyprint').toArray().forEach(function(element){
let linenums = element.clientHeight / 25.2;
if (linenums > 99) {
$(element).addClass('threec');
}
else if (linenums > 9) {
$(element).addClass('twoc');
}
});
$('.prettyprint').toArray().forEach(function(element){
let linenums = element.clientHeight / 25.2;
if (linenums > 99) {
$(element).addClass('threec');
}
else if (linenums > 9) {
$(element).addClass('twoc');
}
});

View File

@ -1,46 +0,0 @@
!function(){/*
Copyright (C) 2006 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
"undefined"!==typeof window&&(window.PR_SHOULD_USE_CONTINUATION=!0);
(function(){function T(a){function d(e){var a=e.charCodeAt(0);if(92!==a)return a;var c=e.charAt(1);return(a=w[c])?a:"0"<=c&&"7">=c?parseInt(e.substring(1),8):"u"===c||"x"===c?parseInt(e.substring(2),16):e.charCodeAt(1)}function f(e){if(32>e)return(16>e?"\\x0":"\\x")+e.toString(16);e=String.fromCharCode(e);return"\\"===e||"-"===e||"]"===e||"^"===e?"\\"+e:e}function c(e){var c=e.substring(1,e.length-1).match(RegExp("\\\\u[0-9A-Fa-f]{4}|\\\\x[0-9A-Fa-f]{2}|\\\\[0-3][0-7]{0,2}|\\\\[0-7]{1,2}|\\\\[\\s\\S]|-|[^-\\\\]","g"));
e=[];var a="^"===c[0],b=["["];a&&b.push("^");for(var a=a?1:0,g=c.length;a<g;++a){var h=c[a];if(/\\[bdsw]/i.test(h))b.push(h);else{var h=d(h),k;a+2<g&&"-"===c[a+1]?(k=d(c[a+2]),a+=2):k=h;e.push([h,k]);65>k||122<h||(65>k||90<h||e.push([Math.max(65,h)|32,Math.min(k,90)|32]),97>k||122<h||e.push([Math.max(97,h)&-33,Math.min(k,122)&-33]))}}e.sort(function(e,a){return e[0]-a[0]||a[1]-e[1]});c=[];g=[];for(a=0;a<e.length;++a)h=e[a],h[0]<=g[1]+1?g[1]=Math.max(g[1],h[1]):c.push(g=h);for(a=0;a<c.length;++a)h=
c[a],b.push(f(h[0])),h[1]>h[0]&&(h[1]+1>h[0]&&b.push("-"),b.push(f(h[1])));b.push("]");return b.join("")}function m(e){for(var a=e.source.match(RegExp("(?:\\[(?:[^\\x5C\\x5D]|\\\\[\\s\\S])*\\]|\\\\u[A-Fa-f0-9]{4}|\\\\x[A-Fa-f0-9]{2}|\\\\[0-9]+|\\\\[^ux0-9]|\\(\\?[:!=]|[\\(\\)\\^]|[^\\x5B\\x5C\\(\\)\\^]+)","g")),b=a.length,d=[],g=0,h=0;g<b;++g){var k=a[g];"("===k?++h:"\\"===k.charAt(0)&&(k=+k.substring(1))&&(k<=h?d[k]=-1:a[g]=f(k))}for(g=1;g<d.length;++g)-1===d[g]&&(d[g]=++E);for(h=g=0;g<b;++g)k=a[g],
"("===k?(++h,d[h]||(a[g]="(?:")):"\\"===k.charAt(0)&&(k=+k.substring(1))&&k<=h&&(a[g]="\\"+d[k]);for(g=0;g<b;++g)"^"===a[g]&&"^"!==a[g+1]&&(a[g]="");if(e.ignoreCase&&q)for(g=0;g<b;++g)k=a[g],e=k.charAt(0),2<=k.length&&"["===e?a[g]=c(k):"\\"!==e&&(a[g]=k.replace(/[a-zA-Z]/g,function(a){a=a.charCodeAt(0);return"["+String.fromCharCode(a&-33,a|32)+"]"}));return a.join("")}for(var E=0,q=!1,l=!1,n=0,b=a.length;n<b;++n){var p=a[n];if(p.ignoreCase)l=!0;else if(/[a-z]/i.test(p.source.replace(/\\u[0-9a-f]{4}|\\x[0-9a-f]{2}|\\[^ux]/gi,
""))){q=!0;l=!1;break}}for(var w={b:8,t:9,n:10,v:11,f:12,r:13},r=[],n=0,b=a.length;n<b;++n){p=a[n];if(p.global||p.multiline)throw Error(""+p);r.push("(?:"+m(p)+")")}return new RegExp(r.join("|"),l?"gi":"g")}function U(a,d){function f(a){var b=a.nodeType;if(1==b){if(!c.test(a.className)){for(b=a.firstChild;b;b=b.nextSibling)f(b);b=a.nodeName.toLowerCase();if("br"===b||"li"===b)m[l]="\n",q[l<<1]=E++,q[l++<<1|1]=a}}else if(3==b||4==b)b=a.nodeValue,b.length&&(b=d?b.replace(/\r\n?/g,"\n"):b.replace(/[ \t\r\n]+/g,
" "),m[l]=b,q[l<<1]=E,E+=b.length,q[l++<<1|1]=a)}var c=/(?:^|\s)nocode(?:\s|$)/,m=[],E=0,q=[],l=0;f(a);return{a:m.join("").replace(/\n$/,""),c:q}}function J(a,d,f,c,m){f&&(a={h:a,l:1,j:null,m:null,a:f,c:null,i:d,g:null},c(a),m.push.apply(m,a.g))}function V(a){for(var d=void 0,f=a.firstChild;f;f=f.nextSibling)var c=f.nodeType,d=1===c?d?a:f:3===c?W.test(f.nodeValue)?a:d:d;return d===a?void 0:d}function G(a,d){function f(a){for(var l=a.i,n=a.h,b=[l,"pln"],p=0,q=a.a.match(m)||[],r={},e=0,t=q.length;e<
t;++e){var z=q[e],v=r[z],g=void 0,h;if("string"===typeof v)h=!1;else{var k=c[z.charAt(0)];if(k)g=z.match(k[1]),v=k[0];else{for(h=0;h<E;++h)if(k=d[h],g=z.match(k[1])){v=k[0];break}g||(v="pln")}!(h=5<=v.length&&"lang-"===v.substring(0,5))||g&&"string"===typeof g[1]||(h=!1,v="src");h||(r[z]=v)}k=p;p+=z.length;if(h){h=g[1];var A=z.indexOf(h),C=A+h.length;g[2]&&(C=z.length-g[2].length,A=C-h.length);v=v.substring(5);J(n,l+k,z.substring(0,A),f,b);J(n,l+k+A,h,K(v,h),b);J(n,l+k+C,z.substring(C),f,b)}else b.push(l+
k,v)}a.g=b}var c={},m;(function(){for(var f=a.concat(d),l=[],n={},b=0,p=f.length;b<p;++b){var w=f[b],r=w[3];if(r)for(var e=r.length;0<=--e;)c[r.charAt(e)]=w;w=w[1];r=""+w;n.hasOwnProperty(r)||(l.push(w),n[r]=null)}l.push(/[\0-\uffff]/);m=T(l)})();var E=d.length;return f}function x(a){var d=[],f=[];a.tripleQuotedStrings?d.push(["str",/^(?:\'\'\'(?:[^\'\\]|\\[\s\S]|\'{1,2}(?=[^\']))*(?:\'\'\'|$)|\"\"\"(?:[^\"\\]|\\[\s\S]|\"{1,2}(?=[^\"]))*(?:\"\"\"|$)|\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$))/,
null,"'\""]):a.multiLineStrings?d.push(["str",/^(?:\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$)|\`(?:[^\\\`]|\\[\s\S])*(?:\`|$))/,null,"'\"`"]):d.push(["str",/^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$)|\"(?:[^\\\"\r\n]|\\.)*(?:\"|$))/,null,"\"'"]);a.verbatimStrings&&f.push(["str",/^@\"(?:[^\"]|\"\")*(?:\"|$)/,null]);var c=a.hashComments;c&&(a.cStyleComments?(1<c?d.push(["com",/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,null,"#"]):d.push(["com",/^#(?:(?:define|e(?:l|nd)if|else|error|ifn?def|include|line|pragma|undef|warning)\b|[^\r\n]*)/,
null,"#"]),f.push(["str",/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h(?:h|pp|\+\+)?|[a-z]\w*)>/,null])):d.push(["com",/^#[^\r\n]*/,null,"#"]));a.cStyleComments&&(f.push(["com",/^\/\/[^\r\n]*/,null]),f.push(["com",/^\/\*[\s\S]*?(?:\*\/|$)/,null]));if(c=a.regexLiterals){var m=(c=1<c?"":"\n\r")?".":"[\\S\\s]";f.push(["lang-regex",RegExp("^(?:^^\\.?|[+-]|[!=]=?=?|\\#|%=?|&&?=?|\\(|\\*=?|[+\\-]=|->|\\/=?|::?|<<?=?|>>?>?=?|,|;|\\?|@|\\[|~|{|\\^\\^?=?|\\|\\|?=?|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\\s*("+
("/(?=[^/*"+c+"])(?:[^/\\x5B\\x5C"+c+"]|\\x5C"+m+"|\\x5B(?:[^\\x5C\\x5D"+c+"]|\\x5C"+m+")*(?:\\x5D|$))+/")+")")])}(c=a.types)&&f.push(["typ",c]);c=(""+a.keywords).replace(/^ | $/g,"");c.length&&f.push(["kwd",new RegExp("^(?:"+c.replace(/[\s,]+/g,"|")+")\\b"),null]);d.push(["pln",/^\s+/,null," \r\n\t\u00a0"]);c="^.[^\\s\\w.$@'\"`/\\\\]*";a.regexLiterals&&(c+="(?!s*/)");f.push(["lit",/^@[a-z_$][a-z_$@0-9]*/i,null],["typ",/^(?:[@_]?[A-Z]+[a-z][A-Za-z_$@0-9]*|\w+_t\b)/,null],["pln",/^[a-z_$][a-z_$@0-9]*/i,
null],["lit",/^(?:0x[a-f0-9]+|(?:\d(?:_\d+)*\d*(?:\.\d*)?|\.\d\+)(?:e[+\-]?\d+)?)[a-z]*/i,null,"0123456789"],["pln",/^\\[\s\S]?/,null],["pun",new RegExp(c),null]);return G(d,f)}function L(a,d,f){function c(a){var b=a.nodeType;if(1==b&&!t.test(a.className))if("br"===a.nodeName.toLowerCase())m(a),a.parentNode&&a.parentNode.removeChild(a);else for(a=a.firstChild;a;a=a.nextSibling)c(a);else if((3==b||4==b)&&f){var e=a.nodeValue,d=e.match(q);d&&(b=e.substring(0,d.index),a.nodeValue=b,(e=e.substring(d.index+
d[0].length))&&a.parentNode.insertBefore(l.createTextNode(e),a.nextSibling),m(a),b||a.parentNode.removeChild(a))}}function m(a){function c(a,b){var e=b?a.cloneNode(!1):a,k=a.parentNode;if(k){var k=c(k,1),d=a.nextSibling;k.appendChild(e);for(var f=d;f;f=d)d=f.nextSibling,k.appendChild(f)}return e}for(;!a.nextSibling;)if(a=a.parentNode,!a)return;a=c(a.nextSibling,0);for(var e;(e=a.parentNode)&&1===e.nodeType;)a=e;b.push(a)}for(var t=/(?:^|\s)nocode(?:\s|$)/,q=/\r\n?|\n/,l=a.ownerDocument,n=l.createElement("li");a.firstChild;)n.appendChild(a.firstChild);
for(var b=[n],p=0;p<b.length;++p)c(b[p]);d===(d|0)&&b[0].setAttribute("value",d);var w=l.createElement("ol");w.className="linenums";d=Math.max(0,d-1|0)||0;for(var p=0,r=b.length;p<r;++p)n=b[p],n.className="L"+(p+d)%10,n.firstChild||n.appendChild(l.createTextNode("\u00a0")),w.appendChild(n);a.appendChild(w)}function t(a,d){for(var f=d.length;0<=--f;){var c=d[f];I.hasOwnProperty(c)?D.console&&console.warn("cannot override language handler %s",c):I[c]=a}}function K(a,d){a&&I.hasOwnProperty(a)||(a=/^\s*</.test(d)?
"default-markup":"default-code");return I[a]}function M(a){var d=a.j;try{var f=U(a.h,a.l),c=f.a;a.a=c;a.c=f.c;a.i=0;K(d,c)(a);var m=/\bMSIE\s(\d+)/.exec(navigator.userAgent),m=m&&8>=+m[1],d=/\n/g,t=a.a,q=t.length,f=0,l=a.c,n=l.length,c=0,b=a.g,p=b.length,w=0;b[p]=q;var r,e;for(e=r=0;e<p;)b[e]!==b[e+2]?(b[r++]=b[e++],b[r++]=b[e++]):e+=2;p=r;for(e=r=0;e<p;){for(var x=b[e],z=b[e+1],v=e+2;v+2<=p&&b[v+1]===z;)v+=2;b[r++]=x;b[r++]=z;e=v}b.length=r;var g=a.h;a="";g&&(a=g.style.display,g.style.display="none");
try{for(;c<n;){var h=l[c+2]||q,k=b[w+2]||q,v=Math.min(h,k),A=l[c+1],C;if(1!==A.nodeType&&(C=t.substring(f,v))){m&&(C=C.replace(d,"\r"));A.nodeValue=C;var N=A.ownerDocument,u=N.createElement("span");u.className=b[w+1];var B=A.parentNode;B.replaceChild(u,A);u.appendChild(A);f<h&&(l[c+1]=A=N.createTextNode(t.substring(v,h)),B.insertBefore(A,u.nextSibling))}f=v;f>=h&&(c+=2);f>=k&&(w+=2)}}finally{g&&(g.style.display=a)}}catch(y){D.console&&console.log(y&&y.stack||y)}}var D="undefined"!==typeof window?
window:{},B=["break,continue,do,else,for,if,return,while"],F=[[B,"auto,case,char,const,default,double,enum,extern,float,goto,inline,int,long,register,restrict,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"],"catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"],H=[F,"alignas,alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,delegate,dynamic_cast,explicit,export,friend,generic,late_check,mutable,namespace,noexcept,noreturn,nullptr,property,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"],
O=[F,"abstract,assert,boolean,byte,extends,finally,final,implements,import,instanceof,interface,null,native,package,strictfp,super,synchronized,throws,transient"],P=[F,"abstract,add,alias,as,ascending,async,await,base,bool,by,byte,checked,decimal,delegate,descending,dynamic,event,finally,fixed,foreach,from,get,global,group,implicit,in,interface,internal,into,is,join,let,lock,null,object,out,override,orderby,params,partial,readonly,ref,remove,sbyte,sealed,select,set,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,value,var,virtual,where,yield"],
F=[F,"abstract,async,await,constructor,debugger,enum,eval,export,from,function,get,import,implements,instanceof,interface,let,null,of,set,undefined,var,with,yield,Infinity,NaN"],Q=[B,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"],R=[B,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"],
B=[B,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"],S=/^(DIR|FILE|array|vector|(de|priority_)?queue|(forward_)?list|stack|(const_)?(reverse_)?iterator|(unordered_)?(multi)?(set|map)|bitset|u?(int|float)\d*)\b/,W=/\S/,X=x({keywords:[H,P,O,F,"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",Q,R,B],hashComments:!0,cStyleComments:!0,multiLineStrings:!0,regexLiterals:!0}),
I={};t(X,["default-code"]);t(G([],[["pln",/^[^<?]+/],["dec",/^<!\w[^>]*(?:>|$)/],["com",/^<\!--[\s\S]*?(?:-\->|$)/],["lang-",/^<\?([\s\S]+?)(?:\?>|$)/],["lang-",/^<%([\s\S]+?)(?:%>|$)/],["pun",/^(?:<[%?]|[%?]>)/],["lang-",/^<xmp\b[^>]*>([\s\S]+?)<\/xmp\b[^>]*>/i],["lang-js",/^<script\b[^>]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-css",/^<style\b[^>]*>([\s\S]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),"default-markup htm html mxml xhtml xml xsl".split(" "));t(G([["pln",/^[\s]+/,
null," \t\r\n"],["atv",/^(?:\"[^\"]*\"?|\'[^\']*\'?)/,null,"\"'"]],[["tag",/^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],["atn",/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],["pun",/^[=<>\/]+/],["lang-js",/^on\w+\s*=\s*\"([^\"]+)\"/i],["lang-js",/^on\w+\s*=\s*\'([^\']+)\'/i],["lang-js",/^on\w+\s*=\s*([^\"\'>\s]+)/i],["lang-css",/^style\s*=\s*\"([^\"]+)\"/i],["lang-css",/^style\s*=\s*\'([^\']+)\'/i],["lang-css",/^style\s*=\s*([^\"\'>\s]+)/i]]),["in.tag"]);
t(G([],[["atv",/^[\s\S]+/]]),["uq.val"]);t(x({keywords:H,hashComments:!0,cStyleComments:!0,types:S}),"c cc cpp cxx cyc m".split(" "));t(x({keywords:"null,true,false"}),["json"]);t(x({keywords:P,hashComments:!0,cStyleComments:!0,verbatimStrings:!0,types:S}),["cs"]);t(x({keywords:O,cStyleComments:!0}),["java"]);t(x({keywords:B,hashComments:!0,multiLineStrings:!0}),["bash","bsh","csh","sh"]);t(x({keywords:Q,hashComments:!0,multiLineStrings:!0,tripleQuotedStrings:!0}),["cv","py","python"]);t(x({keywords:"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",
hashComments:!0,multiLineStrings:!0,regexLiterals:2}),["perl","pl","pm"]);t(x({keywords:R,hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["rb","ruby"]);t(x({keywords:F,cStyleComments:!0,regexLiterals:!0}),["javascript","js","ts","typescript"]);t(x({keywords:"all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,throw,true,try,unless,until,when,while,yes",hashComments:3,cStyleComments:!0,multilineStrings:!0,tripleQuotedStrings:!0,
regexLiterals:!0}),["coffee"]);t(G([],[["str",/^[\s\S]+/]]),["regex"]);var Y=D.PR={createSimpleLexer:G,registerLangHandler:t,sourceDecorator:x,PR_ATTRIB_NAME:"atn",PR_ATTRIB_VALUE:"atv",PR_COMMENT:"com",PR_DECLARATION:"dec",PR_KEYWORD:"kwd",PR_LITERAL:"lit",PR_NOCODE:"nocode",PR_PLAIN:"pln",PR_PUNCTUATION:"pun",PR_SOURCE:"src",PR_STRING:"str",PR_TAG:"tag",PR_TYPE:"typ",TAOSDATA_FUNCTION:"td-fun",TAOSDATA_DATATYPE:"td-dtp",TAOSDATA_TERMINAL:"tem",TAOSDATA_OPTION:"td-opt",prettyPrintOne:D.prettyPrintOne=function(a,d,f){f=f||!1;d=d||null;var c=document.createElement("div");c.innerHTML="<pre>"+a+"</pre>";
c=c.firstChild;f&&L(c,f,!0);M({j:d,m:f,h:c,l:1,a:null,i:null,c:null,g:null});return c.innerHTML},prettyPrint:D.prettyPrint=function(a,d){function f(){for(var c=D.PR_SHOULD_USE_CONTINUATION?b.now()+250:Infinity;p<x.length&&b.now()<c;p++){for(var d=x[p],l=g,n=d;n=n.previousSibling;){var m=n.nodeType,u=(7===m||8===m)&&n.nodeValue;if(u?!/^\??prettify\b/.test(u):3!==m||/\S/.test(n.nodeValue))break;if(u){l={};u.replace(/\b(\w+)=([\w:.%+-]+)/g,function(a,b,c){l[b]=c});break}}n=d.className;if((l!==g||r.test(n))&&
!e.test(n)){m=!1;for(u=d.parentNode;u;u=u.parentNode)if(v.test(u.tagName)&&u.className&&r.test(u.className)){m=!0;break}if(!m){d.className+=" prettyprinted";m=l.lang;if(!m){var m=n.match(w),q;!m&&(q=V(d))&&z.test(q.tagName)&&(m=q.className.match(w));m&&(m=m[1])}if(B.test(d.tagName))u=1;else var u=d.currentStyle,y=t.defaultView,u=(u=u?u.whiteSpace:y&&y.getComputedStyle?y.getComputedStyle(d,null).getPropertyValue("white-space"):0)&&"pre"===u.substring(0,3);y=l.linenums;(y="true"===y||+y)||(y=(y=n.match(/\blinenums\b(?::(\d+))?/))?
y[1]&&y[1].length?+y[1]:!0:!1);y&&L(d,y,u);M({j:m,h:d,m:y,l:u,a:null,i:null,c:null,g:null})}}}p<x.length?D.setTimeout(f,250):"function"===typeof a&&a()}for(var c=d||document.body,t=c.ownerDocument||document,c=[c.getElementsByTagName("pre"),c.getElementsByTagName("code"),c.getElementsByTagName("xmp")],x=[],q=0;q<c.length;++q)for(var l=0,n=c[q].length;l<n;++l)x.push(c[q][l]);var c=null,b=Date;b.now||(b={now:function(){return+new Date}});var p=0,w=/\blang(?:uage)?-([\w.]+)(?!\S)/,r=/\bprettyprint\b/,
e=/\bprettyprinted\b/,B=/pre|xmp/i,z=/^code$/i,v=/^(?:pre|code|xmp)$/i,g={};f()}},H=D.define;"function"===typeof H&&H.amd&&H("google-code-prettify",[],function(){return Y})})();}()

View File

@ -1,31 +0,0 @@
PR['registerLangHandler'](
PR['createSimpleLexer'](
[
// Whitespace
[PR['PR_PLAIN'], /^[\t\n\r \xA0]+/, null, '\t\n\r \xA0'],
// A double or single quoted, possibly multi-line, string.
[PR['PR_STRING'], /^(?:"(?:[^\"\\]|\\.)*"|'(?:[^\'\\]|\\.)*')/, null,
'"\'']
],
[
// A comment is either a line comment that starts with two dashes, or
// two dashes preceding a long bracketed block.
[PR['PR_COMMENT'], /^(?:--[^\r\n]*|\/\*[\s\S]*?(?:\*\/|$))/],
[PR['PR_KEYWORD'], /^(?:ADD|ALL|ALTER|AND|ANY|APPLY|AS|ASC|AUTHORIZATION|BACKUP|BEGIN|BETWEEN|BREAK|BROWSE|BULK|BY|CASCADE|CASE|CHECK|CHECKPOINT|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMN|COMMIT|COMPUTE|CONNECT|CONSTRAINT|CONTAINS|CONTAINSTABLE|CONTINUE|CONVERT|CREATE|CROSS|CURRENT|CURRENT_DATE|CURRENT_TIME|CURRENT_TIMESTAMP|CURRENT_USER|CURSOR|DATABASE|DBCC|DEALLOCATE|DECLARE|DEFAULT|DELETE|DENY|DESC|DISK|DISTINCT|DISTRIBUTED|DROP|DUMMY|DUMP|ELSE|END|ERRLVL|ESCAPE|EXCEPT|EXEC|EXECUTE|EXISTS|EXIT|FETCH|FILE|FILL|FILLFACTOR|FOLLOWING|FOR|FOREIGN|FREETEXT|FREETEXTTABLE|FROM|FULL|FUNCTION|GOTO|GRANT|GROUP|HAVING|HOLDLOCK|IDENTITY|IDENTITYCOL|IDENTITY_INSERT|IF|IN|INDEX|INNER|INSERT|INTERSECT|INTO|IS|JOIN|KEY|KILL|LEFT|LIKE|LINENO|LOAD|MATCH|MATCHED|MERGE|NATURAL|NATIONAL|NOCHECK|NONCLUSTERED|NOCYCLE|NOT|NULL|NULLIF|OF|OFF|OFFSETS|ON|OPEN|OPENDATASOURCE|OPENQUERY|OPENROWSET|OPENXML|OPTION|OR|ORDER|OUTER|OVER|PARTITION|PERCENT|PIVOT|PLAN|PRECEDING|PRIMARY|PRINT|PROC|PROCEDURE|PUBLIC|RAISERROR|READ|READTEXT|RECONFIGURE|REFERENCES|REPLICATION|RESTORE|RESTRICT|RETURN|REVOKE|RIGHT|ROLLBACK|ROWCOUNT|ROWGUIDCOL|RULE|SAVE|SCHEMA|SELECT|SESSION_USER|SET|SETUSER|SHUTDOWN|SLIDING|SOME|START|STATISTICS|SYSTEM_USER|TABLE|TAGS|TEXTSIZE|THEN|TO|TRAN|TRANSACTION|TRIGGER|TRUNCATE|TSEQUAL|UNBOUNDED|UNION|UNIQUE|UNPIVOT|UPDATE|UPDATETEXT|USE|USER|USING|VALUES|VARYING|VIEW|WAITFOR|WHEN|WHERE|WHILE|WITH|WITHIN|WRITETEXT|XML|ID|STRING|INTEGER|OR|AND|NOT|EQ|NE|ISNULL|NOTNULL|IS|LIKE|GLOB|BETWEEN|IN|GT|GE|LT|LE|BITAND|BITOR|LSHIFT|RSHIFT|PLUS|MINUS|DIVIDE|TIMES|STAR|SLASH|REM|CONCAT|UMINUS|UPLUS|BITNOT|SHOW|DATABASES|MNODES|USERS|MODULES|QUERIES|CONNECTIONS|STREAMS|CONFIGS|SCORES|GRANTS|DOT|TABLES|METRICS|VGROUPS|DROP|TABLE|DATABASE|IP|USER|USE|DESCRIBE|ALTER|PASS|PRIVILEGE|LOCAL|IF|EXISTS|REPLICA|DAYS|KEEP|ROWS|CACHE|ABLOCKS|TBLOCKS|CTIME|CLOG|COMP|LP|RP|TAGS|USING|AS|COMMA|SELECT|FROM|VARIABLE|INTERVAL|FILL|SLIDING|ORDER|BY|ASC|DESC|GROUP|LIMIT|OFFSET|WHERE|NOW|INSERT|INTO|VALUES|RESET|QUERY|ADD|COLUMN|TAG|CHANGE|SET|KILL|CONNECTION|STREAM|ABORT|AFTER|ATTACH|BEFORE|BEGIN|CASCADE|CLUSTER|CONFLICT|COPY|DEFERRED|DELIMITERS|DETACH|EACH|END|EXPLAIN|FAIL|FOR|IGNORE|IMMEDIATE|INITIALLY|INSTEAD|MATCH|KEY|OF|RAISE|REPLACE|RESTRICT|ROW|STATEMENT|TRIGGER|VIEW|ALL|SEMI|NONE|PREV|LINEAR|IMPORT|METRIC|TBNAME|JOIN|STABLE|STABLES|SLIMIT|SOFFSET|HAVING|PRECISION|STREAMS|NULL)(?=[^\w-]|$)/i, null],
//
[PR['TAOSDATA_FUNCTION'], /^(?:"APERCENTILE|AVG|BOTTOM|COUNT|DIFF|FIRST|HISTOGRAM|INTERP|LAST|LAST_ROW|LEASTSQUARES|MAX|MIN|PERCENTILE|SPREAD|STDDEV|SUM|TOP|WAVG")(?=[^\w-]|$)/i, null],
[PR['TAOSDATA_OPTION'],
/^(?:ABLOCKS|CACHE|CLOG|COMP|CTIME|DAYS|KEEP|PRECISION|REPLICA|ROWS|TABLES|TBLOCKS)(?=[^\w-]|$)/i,null],
[PR['TAOSDATA_DATATYPE'],
/^(?:BIGINT|BINARY|BOOL|DOUBLE|FLOAT|INT|NCHAR|SMALLINT|TINYINT[^\w-]|$)/i, null],
// A number is a hex integer literal, a decimal real literal, or in
// scientific notation.
[PR['PR_LITERAL'],
/^[+-]?(?:0x[\da-f]+|(?:(?:\.\d+|\d+(?:\.\d*)?)(?:e[+\-]?\d+)?))/i],
// An identifier
[PR['PR_PLAIN'], /^[a-z_][\w-]*/i],
// A run of punctuation
[PR['PR_PUNCTUATION'], /^[^\w\t\n\r \xA0\"\'][^\w\t\n\r \xA0+\-\"\']*/]
]
),
['sql','mysql']);

View File

@ -1,9 +0,0 @@
PR['registerLangHandler'](
PR['createSimpleLexer'](
[
// A comment is either a line comment that starts with two dashes, or
// two dashes preceding a long bracketed block.
[PR['TAOSDATA_TERMINAL'], /^(*?)/]
]
),
['terminal','term']);

View File

@ -1,231 +0,0 @@
pre.prettyprint ol {
list-style-type: none;
margin-left: 0;
}
pre.prettyprint ol > li {
counter-increment: customlistcounter;
}
pre.prettyprint ol > li:first-child:before {
border-top-left-radius: 0.25rem;
}
pre.prettyprint ol > li:last-child:before {
border-bottom-left-radius: 0.25rem;
}
pre.prettyprint ol > li:before {
content: counter(customlistcounter) " ";
font-weight: 300;
display: inline-block;
position: absolute;
transform:translateX(-38px);
width: 27px;
text-align: right;
background-color:var(--white);
padding-bottom: 0.1px;
}
pre.prettyprint ol > li:nth-last-child(1)::before {
padding-bottom: 0px !important;
}
pre.prettyprint.twoc ol > li:before {
transform:translateX(-45px);
width:34px;
}
pre.prettyprint.twoc {
padding-left:35px;
}
pre.prettyprint.threec ol > li:before {
transform:translateX(-53px);
width:42px;
}
pre.prettyprint.threec {
padding-left:43px;
}
ol:first-child {
counter-reset: customlistcounter;
}
pre.prettyprint ol {
padding-right: 4px;
}
pre .atn,
pre .kwd,
pre .tag {
font-weight: 400
}
pre * {
font-family:monospace;
}
pre.prettyprint li {
background-color:rgb(244,245,246);
}
pre.prettyprint {
display: block;
background-color:rgb(244,245,246);
border-radius:0.25rem;
padding-left: 27px;
/*each additional digit needs 8px*/
width:100%;
border:1px solid #e7eaed;
color:#d58936;
}
/* TAOSDATA Specific */
pre.lang-blank span {
color:var(--sg1);
}
pre.lang-blank {
}
pre.lang-term span{
color: var(--white) ;
}
pre.lang-term ol {
background-color: var(--sg1);
}
pre.lang-term ol.linenums {
border-left:1px solid var(--sg1);
}
pre.lang-term li {
background-color:var(--sg1);
}
/*Functions*/
pre .td-fun {
color:#f24352;
}
/*Options*/
pre .td-opt {
/*color:mediumpurple;*/
color:#5882bc;
}
/*Datatypes*/
pre .td-dtp {
color:darkcyan;
}
pre .nocode {
background-color: var(--white);
color: var(--sg1);
}
/*Strings*/
pre .str {
color: #690;
}
/*Keywords*/
pre .kwd {
color: #5882bc;
}
/*Comments*/
pre .com {
color: slategray;
}
/*Type*/
pre .typ {
color: #9c5fc6;
}
/*Literals*/
pre .lit {
color: #91001f;
}
/*Plain Text*/
pre .pln {
color: #d58936;
}
/*Punctuation*/
pre .pun {
color: rgb(51,66,78);
}
pre .tag {
color: khaki
}
pre .atn {
color: #bdb76b
}
pre .atv {
color: #ffa0a0
}
pre .dec {
color: #98fb98
}
ol.linenums {
margin-top: 0;
margin-bottom: 0;
color: #AEAEAE;
border-left:1px solid var(--b1);
padding-left: 0px;
}
pre li {
padding-left: 0.6rem;
}
li.L0,
li.L1,
li.L2,
li.L3,
li.L5,
li.L6,
li.L7,
li.L8 {
list-style-type: none
}
@media print {
pre.prettyprint {
background-color: none
}
code .str,
pre .str {
color: #690;
}
code .kwd,
pre .kwd {
color: #5882bc;
font-weight: 400
}
code .com,
pre .com {
color: #600;
font-style: italic
}
code .typ,
pre .typ {
color: #404;
font-weight: 400
}
code .lit,
pre .lit {
color: #044
}
code .pun,
pre .pun {
color: #440
}
code .pln,
pre .pln {
color: #000
}
code .tag,
pre .tag {
color: #006;
font-weight: 400
}
code .atn,
pre .atn {
color: #404
}
code .atv,
pre .atv {
color: #060
}
}

File diff suppressed because one or more lines are too long

View File

@ -1,120 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/more-on-system-architecture/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/more-on-system-architecture/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>TDengine System Architecture</h1>
<a class='anchor' id='Storage-Design'></a><h2>Storage Design</h2>
<p>TDengine data mainly include <strong>metadata</strong> and <strong>data</strong> that we will introduce in the following sections.</p>
<a class='anchor' id='Metadata-Storage'></a><h3>Metadata Storage</h3>
<p>Metadata include the information of databases, tables, etc. Metadata files are saved in <em>/var/lib/taos/mgmt/</em> directory by default. The directory tree is as below:</p>
<pre><code>/var/lib/taos/
+--mgmt/
+--db.db
+--meters.db
+--user.db
+--vgroups.db</code></pre>
<p>A metadata structure (database, table, etc.) is saved as a record in a metadata file. All metadata files are appended only, and even a drop operation adds a deletion record at the end of the file.</p>
<a class='anchor' id='Data-storage'></a><h3>Data storage</h3>
<p>Data in TDengine are sharded according to the time range. Data of tables in the same vnode in a certain time range are saved in the same filegroup, such as files v0f1804*. This sharding strategy can effectively improve data searching speed. By default, a group of files contains data in 10 days, which can be configured by *daysPerFile* in the configuration file or by <em>DAYS</em> keyword in <em>CREATE DATABASE</em> clause. Data in files are blockwised. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp, which helps to improve the compression rate and save storage. The compression algorithms used in TDengine include simple8B, delta-of-delta, RLE, LZ4, etc.</p>
<p>By default, TDengine data are saved in <em>/var/lib/taos/data/</em> directory. <em>/var/lib/taos/tsdb/</em> directory contains vnode informations and data file linkes.</p>
<pre><code>/var/lib/taos/
+--tsdb/
| +--vnode0
| +--meterObj.v0
| +--db/
| +--v0f1804.head-&gt;/var/lib/taos/data/vnode0/v0f1804.head1
| +--v0f1804.data-&gt;/var/lib/taos/data/vnode0/v0f1804.data
| +--v0f1804.last-&gt;/var/lib/taos/data/vnode0/v0f1804.last1
| +--v0f1805.head-&gt;/var/lib/taos/data/vnode0/v0f1805.head1
| +--v0f1805.data-&gt;/var/lib/taos/data/vnode0/v0f1805.data
| +--v0f1805.last-&gt;/var/lib/taos/data/vnode0/v0f1805.last1
| :
+--data/
+--vnode0/
+--v0f1804.head1
+--v0f1804.data
+--v0f1804.last1
+--v0f1805.head1
+--v0f1805.data
+--v0f1805.last1
:</code></pre>
<h4>meterObj file</h4>
<p>There are only one meterObj file in a vnode. Informations bout the vnode, such as created time, configuration information, vnode statistic informations are saved in this file. It has the structure like below:</p>
<pre><code>&lt;start_of_file&gt;
[file_header]
[table_record1_offset&amp;length]
[table_record2_offset&amp;length]
...
[table_recordN_offset&amp;length]
[table_record1]
[table_record2]
...
[table_recordN]
&lt;end_of_file&gt;</code></pre>
<p>The file header takes 512 bytes, which mainly contains informations about the vnode. Each table record is the representation of a table on disk.</p>
<h4>head file</h4>
<p>The <em>head</em> files contain the index of data blocks in the <em>data</em> file. The inner organization is as below:</p>
<pre><code>&lt;start_of_file&gt;
[file_header]
[table1_offset]
[table2_offset]
...
[tableN_offset]
[table1_index_block]
[table2_index_block]
...
[tableN_index_block]
&lt;end_of_file&gt;</code></pre>
<p>The table offset array in the <em>head</em> file saves the information about the offsets of each table index block. Indices on data blocks in the same table are saved continuously. This also makes it efficient to load data indices on the same table. The data index block has a structure like:</p>
<pre><code>[index_block_info]
[block1_index]
[block2_index]
...
[blockN_index]</code></pre>
<p>The index block info part contains the information about the index block such as the number of index blocks, etc. Each block index corresponds to a real data block in the <em>data</em> file or <em>last</em> file. Information about the location of the real data block, the primary timestamp range of the data block, etc. are all saved in the block index part. The block indices are sorted in ascending order according to the primary timestamp. So we can apply algorithms such as the binary search on the data to efficiently search blocks according to time.</p>
<h4>data file</h4>
<p>The <em>data</em> files store the real data block. They are append-only. The organization is as:</p>
<pre><code>&lt;start_of_file&gt;
[file_header]
[block1]
[block2]
...
[blockN]
&lt;end_of_file&gt;</code></pre>
<p>A data block in <em>data</em> files only belongs to a table in the vnode and the records in a data block are sorted in ascending order according to the primary timestamp key. Data blocks are column-oriented. Data in the same column are stored contiguously, which improves reading speed and compression rate because of their similarity. A data block has the following organization:</p>
<pre><code>[column1_info]
[column2_info]
...
[columnN_info]
[column1_data]
[column2_data]
...
[columnN_data]</code></pre>
<p>The column info part includes information about column types, column compression algorithm, column data offset and length in the <em>data</em> file, etc. Besides, pre-calculated results of the column data in the block are also in the column info part, which helps to improve reading speed by avoiding loading data block necessarily.</p>
<h4>last file</h4>
<p>To avoid storage fragment and to import query speed and compression rate, TDengine introduces an extra file, the <em>last</em> file. When the number of records in a data block is lower than a threshold, TDengine will flush the block to the <em>last</em> file for temporary storage. When new data comes, the data in the <em>last</em> file will be merged with the new data and form a larger data block and written to the <em>data</em> file. The organization of the <em>last</em> file is similar to the <em>data</em> file.</p>
<a class='anchor' id='Summary'></a><h3>Summary</h3>
<p>The innovation in architecture and storage design of TDengine improves resource usage. On the one hand, the virtualization makes it easy to distribute resources between different vnodes and for future scaling. On the other hand, sorted and column-oriented storage makes TDengine have a great advantage in writing, querying and compression.</p>
<a class='anchor' id='Query-Design'></a><h2>Query Design</h2>
<h4>Introduction</h4>
<p>TDengine provides a variety of query functions for both tables and super tables. In addition to regular aggregate queries, it also provides time window based query and statistical aggregation for time series data. TDengine's query processing requires the client app, management node, and data node to work together. The functions and modules involved in query processing included in each component are as follows:</p>
<p>Client (Client App). The client development kit, embed in a client application, consists of TAOS SQL parser and query executor, the second-stage aggregator (Result Merger), continuous query manager and other major functional modules. The SQL parser is responsible for parsing and verifying the SQL statement and converting it into an abstract syntax tree. The query executor is responsible for transforming the abstract syntax tree into the query execution logic and creates the metadata query according to the query condition of the SQL statement. Since TAOS SQL does not currently include complex nested queries and pipeline query processing mechanism, there is no longer need for query plan optimization and physical query plan conversions. The second-stage aggregator is responsible for performing the aggregation of the independent results returned by query involved data nodes at the client side to generate final results. The continuous query manager is dedicated to managing the continuous queries created by users, including issuing fixed-interval query requests and writing the results back to TDengine or returning to the client application as needed. Also, the client is also responsible for retrying after the query fails, canceling the query request, and maintaining the connection heartbeat and reporting the query status to the management node.</p>
<p>Management Node. The management node keeps the metadata of all the data of the entire cluster system, provides the metadata of the data required for the query from the client node, and divides the query request according to the load condition of the cluster. The super table contains information about all the tables created according to the super table, so the query processor (Query Executor) of the management node is responsible for the query processing of the tags of tables and returns the table information satisfying the tag query. Besides, the management node maintains the query status of the cluster in the Query Status Manager component, in which the metadata of all queries that are currently executing are temporarily stored in-memory buffer. When the client issues <em>show queries</em> command to management node, current running queries information is returned to the client.</p>
<p>Data Node. The data node, responsible for storing all data of the database, consists of query executor, query processing scheduler, query task queue, and other related components. Once the query requests from the client received, they are put into query task queue and waiting to be processed by query executor. The query executor extracts the query request from the query task queue and invokes the query optimizer to perform the basic optimization for the query execution plan. And then query executor scans the qualified data blocks in both cache and disk to obtain qualified data and return the calculated results. Besides, the data node also needs to respond to management information and commands from the management node. For example, after the <em>kill query</em> received from the management node, the query task needs to be stopped immediately.</p>
<p><center> <img src="../assets/fig1.png"> </center>
<center>Fig 1. System query processing architecture diagram (only query related components)</center></p>
<h4>Query Process Design</h4>
<p>The client, the management node, and the data node cooperate to complete the entire query processing of TDengine. Let's take a concrete SQL query as an example to illustrate the whole query processing flow. The SQL statement is to query on super table <em>FOO_SUPER_TABLE</em> to get the total number of records generated on January 12, 2019, from the table, of which TAG_LOC equals to 'beijing'. The SQL statement is as follows:</p>
<pre><code class="sql language-sql">SELECT COUNT(*)
FROM FOO_SUPER_TABLE
WHERE TAG_LOC = 'beijing' AND TS &gt;= '2019-01-12 00:00:00' AND TS &lt; '2019-01-13 00:00:00'</code></pre>
<p>First, the client invokes the TAOS SQL parser to parse and validate the SQL statement, then generates a syntax tree, and extracts the object of the query - the super table <em>FOO_SUPER_TABLE</em>, and then the parser sends requests with filtering information (TAG_LOC='beijing') to management node to get the corresponding metadata about <em>FOO_SUPER_TABLE</em>.</p>
<p>Once the management node receives the request for metadata acquisition, first finds the super table <em>FOO_SUPER_TABLE</em> basic information, and then applies the query condition (TAG_LOC='beijing') to filter all the related tables created according to it. And finally, the query executor returns the metadata information that satisfies the query request to the client.</p>
<p>After the client obtains the metadata information of <em>FOO_SUPER_TABLE</em>, the query executor initiates a query request with timestamp range filtering condition (TS &gt;= '2019- 01-12 00:00:00' AND TS &lt; '2019-01-13 00:00:00') to all nodes that hold the corresponding data according to the information about data distribution in metadata.</p>
<p>The data node receives the query sent from the client, converts it into an internal structure and puts it into the query task queue to be executed by query executor after optimizing the execution plan. When the query result is obtained, the query result is returned to the client. It should be noted that the data nodes perform the query process independently of each other, and rely solely on their data and content for processing.</p>
<p>When all data nodes involved in the query return results, the client aggregates the result sets from each data node. In this case, all results are accumulated to generate the final query result. The second stage of aggregation is not always required for all queries. For example, a column selection query does not require a second-stage aggregation at all.</p>
<h4>REST Query Process</h4>
<p>In addition to C/C++, Python, and JDBC interface, TDengine also provides a REST interface based on the HTTP protocol, which is different from using the client application programming interface. When the user uses the REST interface, all the query processing is completed on the server-side, and the user's application is not involved in query processing anymore. After the query processing is completed, the result is returned to the client through the HTTP JSON string.</p>
<p><center> <img src="../assets/fig2.png"> </center>
<center>Fig. 2 REST query architecture</center></p>
<p>When a client uses an HTTP-based REST query interface, the client first establishes a connection with the HTTP connector at the data node and then uses the token to ensure the reliability of the request through the REST signature mechanism. For the data node, after receiving the request, the HTTP connector invokes the embedded client program to initiate a query processing, and then the embedded client parses the SQL statement from the HTTP connector and requests the management node to get metadata as needed. After that, the embedded client sends query requests to the same data node or other nodes in the cluster and aggregates the calculation results on demand. Finally, you also need to convert the result of the query into a JSON format string and return it to the client via an HTTP response. After the HTTP connector receives the request SQL, the subsequent process processing is completely consistent with the query processing using the client application development kit.</p>
<p>It should be noted that during the entire processing, the client application is no longer involved in, and is only responsible for sending SQL requests through the HTTP protocol and receiving the results in JSON format. Besides, each data node is embedded with an HTTP connector and a client, so any data node in the cluster received requests from a client, the data node can initiate the query and return the result to the client through the HTTP protocol, with transfer the request to other data nodes.</p>
<h4>Technology</h4>
<p>Because TDengine stores data and tags value separately, the tag value is kept in the management node and directly associated with each table instead of records, resulting in a great reduction of the data storage. Therefore, the tag value can be managed by a fully in-memory structure. First, the filtering of the tag data can drastically reduce the data size involved in the second phase of the query. The query processing for the data is performed at the data node. TDengine takes advantage of the immutable characteristics of IoT data by calculating the maximum, minimum, and other statistics of the data in one data block on each saved data block, to effectively improve the performance of query processing. If the query process involves all the data of the entire data block, the pre-computed result is used directly, and the content of the data block is no longer needed. Since the size of disk space required to store the pre-computation result is much smaller than the size of the specific data, the pre-computation result can greatly reduce the disk IO and speed up the query processing.</p>
<p>TDengine employs column-oriented data storage techniques. When the data block is involved to be loaded from the disk for calculation, only the required column is read according to the query condition, and the read overhead can be minimized. The data of one column is stored in a contiguous memory block and therefore can make full use of the CPU L2 cache to greatly speed up the data scanning. Besides, TDengine utilizes the eagerly responding mechanism and returns a partial result before the complete result is acquired. For example, when the first batch of results is obtained, the data node immediately returns it directly to the client in case of a column select query. </p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -1,95 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/super-table/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/super-table/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>STable: Super Table</h1>
<p>"One Table for One Device" design can improve the insert/query performance significantly for a single device. But it has a side effect, the aggregation of multiple tables becomes hard. To reduce the complexity and improve the efficiency, TDengine introduced a new concept: STable (Super Table). </p>
<a class='anchor' id='What-is-a-Super-Table'></a><h2>What is a Super Table</h2>
<p>STable is an abstract and a template for a type of device. A STable contains a set of devices (tables) that have the same schema or data structure. Besides the shared schema, a STable has a set of tags, like the model, serial number and so on. Tags are used to record the static attributes for the devices and are used to group a set of devices (tables) for aggregation. Tags are metadata of a table and can be added, deleted or changed. </p>
<p>TDengine does not save tags as a part of the data points collected. Instead, tags are saved as metadata. Each table has a set of tags. To improve query performance, tags are all cached and indexed. One table can only belong to one STable, but one STable may contain many tables. </p>
<p>Like a table, you can create, show, delete and describe STables. Most query operations on tables can be applied to STable too, including the aggregation and selector functions. For queries on a STable, if no tags filter, the operations are applied to all the tables created via this STable. If there is a tag filter, the operations are applied only to a subset of the tables which satisfy the tag filter conditions. It will be very convenient to use tags to put devices into different groups for aggregation.</p>
<a class='anchor' id='Create-a-STable'></a><h2>Create a STable</h2>
<p>Similiar to creating a standard table, syntax is: </p>
<pre><code class="mysql language-mysql">CREATE TABLE &lt;stable_name&gt; (&lt;field_name&gt; TIMESTAMP, field_name1 field_type, ...) TAGS(tag_name tag_type, ...)</code></pre>
<p>New keyword "tags" is introduced, where tag_name is the tag name, and tag_type is the associated data type. </p>
<p>Note: </p>
<ol>
<li>The bytes of all tags together shall be less than 512 </li>
<li>Tag's data type can not be time stamp or nchar</li>
<li>Tag name shall be different from the field name</li>
<li>Tag name shall not be the same as system keywords</li>
<li>Maximum number of tags is 6 </li>
</ol>
<p>For example:</p>
<pre><code class="mysql language-mysql">create table thermometer (ts timestamp, degree float)
tags (location binary(20), type int)</code></pre>
<p>The above statement creates a STable thermometer with two tag "location" and "type"</p>
<a class='anchor' id='Create-a-Table-via-STable'></a><h2>Create a Table via STable</h2>
<p>To create a table for a device, you can use a STable as its template and assign the tag values. The syntax is:</p>
<pre><code class="mysql language-mysql">CREATE TABLE &lt;tb_name&gt; USING &lt;stb_name&gt; TAGS (tag_value1,...)</code></pre>
<p>You can create any number of tables via a STable, and each table may have different tag values. For example, you create five tables via STable thermometer below:</p>
<pre><code class="mysql language-mysql"> create table t1 using thermometer tags ('beijing', 10);
create table t2 using thermometer tags ('beijing', 20);
create table t3 using thermometer tags ('shanghai', 10);
create table t4 using thermometer tags ('shanghai', 20);
create table t5 using thermometer tags ('new york', 10);</code></pre>
<a class='anchor' id='Aggregate-Tables-via-STable'></a><h2>Aggregate Tables via STable</h2>
<p>You can group a set of tables together by specifying the tags filter condition, then apply the aggregation operations. The result set can be grouped and ordered based on tag value. Syntax is:</p>
<pre><code class="mysql language-mysql">SELECT function&lt;field_name&gt;, ...
FROM &lt;stable_name&gt;
WHERE &lt;tag_name&gt; &lt;[=|&lt;=|&gt;=|&lt;&gt;] values..&gt; ([AND|OR] ...
INTERVAL (&lt;time range&gt;)
GROUP BY &lt;tag_name&gt;, &lt;tag_name&gt; ...
ORDER BY &lt;tag_name&gt; &lt;asc|desc&gt;
SLIMIT &lt;group_limit&gt;
SOFFSET &lt;group_offset&gt;
LIMIT &lt;record_limit&gt;
OFFSET &lt;record_offset&gt;</code></pre>
<p>For the time being, STable supports only the following aggregation/selection functions: <em>sum, count, avg, first, last, min, max, top, bottom</em>, and the projection operations, the same syntax as a standard table. Arithmetic operations are not supported, embedded queries not either. </p>
<p><em>INTERVAL</em> is used for the aggregation over a time range.</p>
<p>If <em>GROUP BY</em> is not used, the aggregation is applied to all the selected tables, and the result set is output in ascending order of the timestamp, but you can use "<em>ORDER BY _c0 ASC|DESC</em>" to specify the order you like. </p>
<p>If <em>GROUP BY <tag_name></em> is used, the aggregation is applied to groups based on tags. Each group is aggregated independently. Result set is a group of aggregation results. The group order is decided by <em>ORDER BY <tag_name></em>. Inside each group, the result set is in the ascending order of the time stamp. </p>
<p><em>SLIMIT/SOFFSET</em> are used to limit the number of groups and starting group number.</p>
<p><em>LIMIT/OFFSET</em> are used to limit the number of records in a group and the starting rows.</p>
<a class='anchor' id='Example-1:'></a><h3>Example 1:</h3>
<p>Check the average, maximum, and minimum temperatures of Beijing and Shanghai, and group the result set by location and type. The SQL statement shall be:</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location='beijing' or location='tianjin'
GROUP BY location, type </code></pre>
<a class='anchor' id='Example-2:'></a><h3>Example 2:</h3>
<p>List the number of records, average, maximum, and minimum temperature every 10 minutes for the past 24 hours for all the thermometers located in Beijing with type 10. The SQL statement shall be:</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE name='beijing' and type=10 and ts&gt;=now-1d
INTERVAL(10M)</code></pre>
<a class='anchor' id='Create-Table-Automatically'></a><h2>Create Table Automatically</h2>
<p>Insert operation will fail if the table is not created yet. But for STable, TDengine can create the table automatically if the application provides the STable name, table name and tags' value when inserting data points. The syntax is:</p>
<pre><code class="mysql language-mysql">INSERT INTO &lt;tb_name&gt; USING &lt;stb_name&gt; TAGS (&lt;tag1_value&gt;, ...) VALUES (field_value, ...) (field_value, ...) ... &lt;tb_name2&gt; USING &lt;stb_name2&gt; TAGS(&lt;tag1_value2&gt;, ...) VALUES (&lt;field1_value1&gt;, ...) ...;</code></pre>
<p>When inserting data points into table tb_name, the system will check if table tb_name is created or not. If it is already created, the data points will be inserted as usual. But if the table is not created yet, the system will create the table tb_bame using STable stb_name as the template with the tags. Multiple tables can be specified in the SQL statement. </p>
<a class='anchor' id='Management-of-STables'></a><h2>Management of STables</h2>
<p>After you can create a STable, you can describe, delete, change STables. This section lists all the supported operations.</p>
<a class='anchor' id='Show-STables-in-current-DB'></a><h3>Show STables in current DB</h3>
<pre><code class="mysql language-mysql">show stables;</code></pre>
<p>It lists all STables in current DB, including the name, created time, number of fileds, number of tags, and number of tables which are created via this STable. </p>
<a class='anchor' id='Describe-a-STable'></a><h3>Describe a STable</h3>
<pre><code class="mysql language-mysql">DESCRIBE &lt;stable_name&gt;</code></pre>
<p>It lists the STable's schema and tags</p>
<a class='anchor' id='Drop-a-STable'></a><h3>Drop a STable</h3>
<pre><code class="mysql language-mysql">DROP TABLE &lt;stable_name&gt;</code></pre>
<p>To delete a STable, all the tables created via this STable will be deleted.</p>
<a class='anchor' id='List-the-Associated-Tables-of-a-STable'></a><h3>List the Associated Tables of a STable</h3>
<pre><code class="mysql language-mysql">SELECT TBNAME,[TAG_NAME, ...] FROM &lt;stable_name&gt; WHERE &lt;tag_name&gt; &lt;[=|=&lt;|&gt;=|&lt;&gt;] values..&gt; ([AND|OR] ...)</code></pre>
<p>It will list all the tables which satisfy the tag filter conditions. The tables are all created from this specific STable. TBNAME is a new keyword introduced, it is the table name associated with the STable. </p>
<pre><code class="mysql language-mysql">SELECT COUNT(TBNAME) FROM &lt;stable_name&gt; WHERE &lt;tag_name&gt; &lt;[=|=&lt;|&gt;=|&lt;&gt;] values..&gt; ([AND|OR] ...)</code></pre>
<p>The above SQL statement will list the number of tables in a STable, which satisfy the filter condition.</p>
<a class='anchor' id='Management-of-Tags'></a><h2>Management of Tags</h2>
<p>You can add, delete and change the tags for a STable, and you can change the tag value of a table. The SQL commands are listed below. </p>
<a class='anchor' id='Add-a-Tag'></a><h3>Add a Tag</h3>
<pre><code class="mysql language-mysql">ALTER TABLE &lt;stable_name&gt; ADD TAG &lt;new_tag_name&gt; &lt;TYPE&gt;</code></pre>
<p>It adds a new tag to the STable with a data type. The maximum number of tags is 6. </p>
<a class='anchor' id='Drop-a-Tag'></a><h3>Drop a Tag</h3>
<pre><code class="mysql language-mysql">ALTER TABLE &lt;stable_name&gt; DROP TAG &lt;tag_name&gt;</code></pre>
<p>It drops a tag from a STable. The first tag could not be deleted, and there must be at least one tag.</p>
<a class='anchor' id='Change-a-Tag's-Name'></a><h3>Change a Tag's Name</h3>
<pre><code class="mysql language-mysql">ALTER TABLE &lt;stable_name&gt; CHANGE TAG &lt;old_tag_name&gt; &lt;new_tag_name&gt;</code></pre>
<p>It changes the name of a tag from old to new. </p>
<a class='anchor' id='Change-the-Tag's-Value'></a><h3>Change the Tag's Value</h3>
<pre><code class="mysql language-mysql">ALTER TABLE &lt;table_name&gt; SET TAG &lt;tag_name&gt;=&lt;new_tag_value&gt;</code></pre>
<p>It changes a table's tag value to a new one. </p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

View File

@ -1,423 +0,0 @@
<!DOCTYPE html><html lang='en'><head><title>Documentation | Taos Data</title><meta name='description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.'><meta name='keywords' content='TDengine, Big Data, Open Source, IoT, Connected Cars, Industrial IoT, time-series database, caching, stream computing, message queuing, IT infrastructure monitoring, application performance monitoring, Internet of Things,TAOS Data, Documentation, programming, coding, syntax, frequently asked questions, questions, faq'><meta name='title' content='Documentation | Taos Data'><meta property='og:site_name' content='Taos Data'/><meta property='og:title' content='Documentation | Taos Data'/><meta property='og:type' content='article'/><meta property='og:url' content='https://www.taosdata.com/en/documentation/taos-sql/index.php'/><meta property='og:description' content='TDengine is an open-source big data platform for IoT. Along with a 10x faster time-series database, it provides caching, stream computing, message queuing, and other functionalities. It is designed and optimized for Internet of Things, Connected Cars, and Industrial IoT. Read the documentation for TDengine here to get started right away.' /><link rel='canonical' href='https://www.taosdata.com/en/documentation/taos-sql/index.php'/><script src='../lib/jquery-3.4.1.min.js' type='application/javascript'></script><link href='../lib/bootstrap.min.css' rel='stylesheet'><link href='../styles/base.min.css' rel='stylesheet'><link rel='stylesheet' href='../lib/docs/taosdataprettify.css'><link rel='stylesheet' href='../lib/docs/docs.css'><script src='../lib/docs/prettify.js'></script><script src='../lib/docs/prettyprint-sql.js'></script></head><body><script>$('#documentation-href').addClass('active')</script><div class='container-fluid'><main class='content-wrapper'><section class='documentation'><a href='../index.html'>Back</a><h1>TAOS SQL</h1>
<p>TDengine provides a SQL like query language to insert or query data. You can execute the SQL statements through TDengine Shell, or through C/C++, Java(JDBC), Python, Restful, Go APIs to interact with the <code>taosd</code> service.</p>
<p>Before reading through, please have a look at the conventions used for syntax descriptions here in this documentation.</p>
<ul>
<li>Squared brackets ("[]") indicate optional arguments or clauses</li>
<li>Curly braces ("{}") indicate that one member from a set of choices in the braces must be chosen</li>
<li>A single verticle line ("|") works a separator for multiple optional args or clauses</li>
<li>Dots ("…") means repeating for as many times</li>
</ul>
<a class='anchor' id='Data-Types'></a><h2>Data Types</h2>
<a class='anchor' id='Timestamp'></a><h3>Timestamp</h3>
<p>The timestamp is the most important data type in TDengine. The first column of each table must be <strong><code>TIMESTAMP</code></strong> type, but other columns can also be <strong><code>TIMESTAMP</code></strong> type. The following rules for timestamp: </p>
<ul>
<li><p>String Format: <code>'YYYY-MM-DD HH:mm:ss.MS'</code>, which represents the year, month, day, hour, minute and second and milliseconds. For example,<code>'2017-08-12 18:52:58.128'</code> is a valid timestamp string. Note: timestamp string must be quoted by either single quote or double quote. </p></li>
<li><p>Epoch Time: a timestamp value can also be a long integer representing milliseconds since the epoch. For example, the values in the above example can be represented as an epoch <code>1502535178128</code> in milliseconds. Please note the epoch time doesn't need any quotes.</p></li>
<li><p>Internal Function<strong><code>NOW</code></strong> : this is the current time of the server</p></li>
<li><p>If timestamp is 0 when inserting a record, timestamp will be set to the current time of the server</p></li>
<li><p>Arithmetic operations can be applied to timestamp. For example: <code>now-2h</code> represents a timestamp which is 2 hours ago from the current server time. Units include <code>a</code> (milliseconds), <code>s</code> (seconds), <code>m</code> (minutes), <code>h</code> (hours), <code>d</code> (days), <code>w</code> (weeks), <code>n</code> (months), <code>y</code> (years). <strong><code>NOW</code></strong> can be used in either insertions or queries. </p></li>
</ul>
<p>Default time precision is millisecond, you can change it to microseocnd by setting parameter enableMicrosecond in <a href="../administrator/#Configuration-on-Server">system configuration</a>. For epoch time, the long integer shall be microseconds since the epoch. For the above string format, MS shall be six digits. </p>
<a class='anchor' id='Data-Types'></a><h3>Data Types</h3>
<p>The full list of data types is listed below. For string types of data, we will use <strong><em>M</em></strong> to indicate the maximum length of that type.</p>
<figure><table>
<thead>
<tr>
<th></th>
<th style="text-align:center;">Data Type</th>
<th style="text-align:center;">Bytes</th>
<th>Note</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td style="text-align:center;">TINYINT</td>
<td style="text-align:center;">1</td>
<td>A nullable integer type with a range of [-127, 127]</td>
</tr>
<tr>
<td>2</td>
<td style="text-align:center;">SMALLINT</td>
<td style="text-align:center;">2</td>
<td>A nullable integer type with a range of [-32767, 32767]</td>
</tr>
<tr>
<td>3</td>
<td style="text-align:center;">INT</td>
<td style="text-align:center;">4</td>
<td>A nullable integer type with a range of [-2^31+1, 2^31-1 ]</td>
</tr>
<tr>
<td>4</td>
<td style="text-align:center;">BIGINT</td>
<td style="text-align:center;">8</td>
<td>A nullable integer type with a range of [-2^59, 2^59 ]</td>
</tr>
<tr>
<td>5</td>
<td style="text-align:center;">FLOAT</td>
<td style="text-align:center;">4</td>
<td>A standard nullable float type with 6 -7 significant digits and a range of [-3.4E38, 3.4E38]</td>
</tr>
<tr>
<td>6</td>
<td style="text-align:center;">DOUBLE</td>
<td style="text-align:center;">8</td>
<td>A standard nullable double float type with 15-16 significant digits and a range of [-1.7E308, 1.7E308]</td>
</tr>
<tr>
<td>7</td>
<td style="text-align:center;">BOOL</td>
<td style="text-align:center;">1</td>
<td>A nullable boolean type, [<strong><code>true</code></strong>, <strong><code>false</code></strong>]</td>
</tr>
<tr>
<td>8</td>
<td style="text-align:center;">TIMESTAMP</td>
<td style="text-align:center;">8</td>
<td>A nullable timestamp type with the same usage as the primary column timestamp</td>
</tr>
<tr>
<td>9</td>
<td style="text-align:center;">BINARY(<em>M</em>)</td>
<td style="text-align:center;"><em>M</em></td>
<td>A nullable string type whose length is <em>M</em>, any exceeded chars will be automatically truncated. This type of string only supports ASCii encoded chars.</td>
</tr>
<tr>
<td>10</td>
<td style="text-align:center;">NCHAR(<em>M</em>)</td>
<td style="text-align:center;">4 * <em>M</em></td>
<td>A nullable string type whose length is <em>M</em>, any exceeded chars will be truncated. The <strong><code>NCHAR</code></strong> type supports Unicode encoded chars.</td>
</tr>
</tbody>
</table></figure>
<p>All the keywords in a SQL statement are case-insensitive, but strings values are case-sensitive and must be quoted by a pair of <code>'</code> or <code>"</code>. To quote a <code>'</code> or a <code>"</code> , you can use the escape character <code>\</code>.</p>
<a class='anchor' id='Database-Management'></a><h2>Database Management</h2>
<ul>
<li><p><strong>Create a Database</strong></p>
<pre><code class="mysql language-mysql">CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep]</code></pre>
<p>Option: <code>KEEP</code> is used for data retention policy. The data records will be removed once keep-days are passed. There are more parameters related to DB storage, please check <a href="../administrator/#Configuration-on-Server">system configuration</a>.</p></li>
<li><p><strong>Use a Database</strong></p>
<pre><code class="mysql language-mysql">USE db_name</code></pre>
<p>Use or switch the current database.</p></li>
<li><p><strong>Drop a Database</strong></p>
<pre><code class="mysql language-mysql">DROP DATABASE [IF EXISTS] db_name</code></pre>
<p>Remove a database, all the tables inside the DB will be removed too, be careful.</p></li>
<li><p><strong>List all Databases</strong></p>
<pre><code class="mysql language-mysql">SHOW DATABASES</code></pre></li>
</ul>
<a class='anchor' id='Table-Management'></a><h2>Table Management</h2>
<ul>
<li><p><strong>Create a Table</strong></p>
<pre><code class="mysql language-mysql">CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...])</code></pre>
<p>Note: 1) the first column must be timstamp, and system will set it as the primary key; 2) the record size is limited to 4096 bytes; 3) for binary or nachr data type, the length shall be specified, for example, binary(20), it means 20 bytes.</p></li>
<li><p><strong>Drop a Table</strong></p>
<pre><code class="mysql language-mysql">DROP TABLE [IF EXISTS] tb_name</code></pre></li>
<li><p>**List all Tables **</p>
<pre><code class="mysql language-mysql">SHOW TABLES [LIKE tb_name_wildcar]</code></pre>
<p>It shows all tables in the current DB. Note: wildcard character can be used in the table name to filter tables. Wildcard character: 1) % means 0 to any number of characters; 2_ underscore means exactly one character.</p></li>
<li><p><strong>Print Table Schema</strong></p>
<pre><code class="mysql language-mysql">DESCRIBE tb_name</code></pre></li>
<li><p><strong>Add a Column</strong></p>
<pre><code class="mysql language-mysql">ALTER TABLE tb_name ADD COLUMN field_name data_type</code></pre></li>
<li><p><strong>Drop a Column</strong></p>
<pre><code class="mysql language-mysql">ALTER TABLE tb_name DROP COLUMN field_name </code></pre>
<p>If the table is created via [Super Table](), the schema can only be changed via STable. But for tables not created from STable, you can change their schema directly.</p></li>
</ul>
<p><strong>Tips</strong>: You can apply an operation on a table not in the current DB by concatenating DB name with the character '.', then with table name. For example, 'demo.tb1' means the operation is applied to table <code>tb1</code> in DB <code>demo</code> although <code>demo</code> is not the current selected DB. </p>
<a class='anchor' id='Inserting-Records'></a><h2>Inserting Records</h2>
<ul>
<li><p><strong>Insert a Record</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb_name VALUES (field_value, ...);</code></pre>
<p>Insert a data record into table tb_name</p></li>
<li><p><strong>Insert a Record with Selected Columns</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb_name (field1_name, ...) VALUES(field1_value, ...)</code></pre>
<p>Insert a data record into table tb_name, with data in selected columns. If a column is not selected, the system will put NULL there. First column (time stamp ) cant not be null, it must be inserted.</p></li>
<li><p><strong>Insert a Batch of Records</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;</code></pre>
<p>Insert multiple data records to the table</p></li>
<li><p><strong>Insert a Batch of Records with Selected Columns</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb_name (field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)</code></pre></li>
<li><p><strong>Insert Records into Multiple Tables</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
tb2_name VALUES (field1_value1, ...)(field1_value2, ...)...;</code></pre>
<p>Insert data records into table tb1_name and tb2_name</p></li>
<li><p><strong>Insert Records into Multiple Tables with Selected Columns</strong></p>
<pre><code class="mysql language-mysql">INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value1, ...)
tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)</code></pre></li>
</ul>
<p>Note: For a table, the new record must have timestamp bigger than the last data record, otherwise, it will be thrown away. If timestamp is 0, the time stamp will be set to the system time on server.</p>
<p><strong>IMPORT</strong>: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT. If you want to import a batch of historical records, the records shall be ordered in the timestamp, otherwise, TDengine won't handle it in the right way.</p>
<a class='anchor' id='Data-Query'></a><h2>Data Query</h2>
<a class='anchor' id='Query-Syntax:'></a><h3>Query Syntax:</h3>
<pre><code class="mysql language-mysql">SELECT {* | expr_list} FROM tb_name
[WHERE where_condition]
[ORDER BY _c0 { DESC | ASC }]
[LIMIT limit [, OFFSET offset]]
[&gt;&gt; export_file]
SELECT function_list FROM tb_name
[WHERE where_condition]
[LIMIT limit [, OFFSET offset]]
[&gt;&gt; export_file]</code></pre>
<ul>
<li>To query a table, use <code>*</code> to select all data from a table; or a specified list of expressions <code>expr_list</code> of columns. The SQL expression can contain alias and arithmetic operations between numeric typed columns.</li>
<li>For the <code>WHERE</code> conditions, use logical operations to filter the timestamp column and all numeric columns, and wild cards to filter the two string typed columns. </li>
<li>Sort the result set by the first column timestamp <code>_c0</code> (or directly use the timestamp column name) in either descending or ascending order (by default). "Order by" could not be applied to other columns.</li>
<li>Use <code>LIMIT</code> and <code>OFFSET</code> to control the number of rows returned and the starting position of the retrieved rows. LIMIT/OFFSET is applied after "order by" operations.</li>
<li>Export the retrieved result set into a CSV file using <code>&gt;&gt;</code>. The target file's full path should be explicitly specified in the statement.</li>
</ul>
<a class='anchor' id='Supported-Operations-of-Data-Filtering:'></a><h3>Supported Operations of Data Filtering:</h3>
<figure><table>
<thead>
<tr>
<th>Operation</th>
<th>Note</th>
<th>Applicable Data Types</th>
</tr>
</thead>
<tbody>
<tr>
<td>&gt;</td>
<td>larger than</td>
<td><strong><code>timestamp</code></strong> and all numeric types</td>
</tr>
<tr>
<td>&lt;</td>
<td>smaller than</td>
<td><strong><code>timestamp</code></strong> and all numeric types</td>
</tr>
<tr>
<td>&gt;=</td>
<td>larger than or equal to</td>
<td><strong><code>timestamp</code></strong> and all numeric types</td>
</tr>
<tr>
<td>&lt;=</td>
<td>smaller than or equal to</td>
<td><strong><code>timestamp</code></strong> and all numeric types</td>
</tr>
<tr>
<td>=</td>
<td>equal to</td>
<td>all types</td>
</tr>
<tr>
<td>&lt;&gt;</td>
<td>not equal to</td>
<td>all types</td>
</tr>
<tr>
<td>%</td>
<td>match with any char sequences</td>
<td><strong><code>binary</code></strong> <strong><code>nchar</code></strong></td>
</tr>
<tr>
<td>_</td>
<td>match with a single char</td>
<td><strong><code>binary</code></strong> <strong><code>nchar</code></strong></td>
</tr>
</tbody>
</table></figure>
<ol>
<li>For two or more conditions, only AND is supported, OR is not supported yet.</li>
<li>For filtering, only a single range is supported. For example, <code>value&gt;20 and value&lt;30</code> is valid condition, but <code>value&lt;20 AND value&lt;&gt;5</code> is invalid condition</li>
</ol>
<a class='anchor' id='Some-Examples'></a><h3>Some Examples</h3>
<ul>
<li><p>For the examples below, table tb1 is created via the following statements</p>
<pre><code class="mysql language-mysql">CREATE TABLE tb1 (ts timestamp, col1 int, col2 float, col3 binary(50))</code></pre></li>
<li><p>Query all the records in tb1 in the last hour:</p>
<pre><code class="mysql language-mysql">SELECT * FROM tb1 WHERE ts &gt;= NOW - 1h</code></pre></li>
<li><p>Query all the records in tb1 between 2018-06-01 08:00:00.000 and 2018-06-02 08:00:00.000, and filter out only the records whose col3 value ends with 'nny', and sort the records by their timestamp in a descending order:</p>
<pre><code class="mysql language-mysql">SELECT * FROM tb1 WHERE ts &gt; '2018-06-01 08:00:00.000' AND ts &lt;= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC</code></pre></li>
<li><p>Query the sum of col1 and col2 as alias 'complex_metric', and filter on the timestamp and col2 values. Limit the number of returned rows to 10, and offset the result by 5.</p>
<pre><code class="mysql language-mysql">SELECT (col1 + col2) AS 'complex_metric' FROM tb1 WHERE ts &gt; '2018-06-01 08:00:00.000' and col2 &gt; 1.2 LIMIT 10 OFFSET 5</code></pre></li>
<li><p>Query the number of records in tb1 in the last 10 minutes, whose col2 value is larger than 3.14, and export the result to file <code>/home/testoutpu.csv</code>.</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*) FROM tb1 WHERE ts &gt;= NOW - 10m AND col2 &gt; 3.14 &gt;&gt; /home/testoutpu.csv</code></pre></li>
</ul>
<a class='anchor' id='SQL-Functions'></a><h2>SQL Functions</h2>
<a class='anchor' id='Aggregation-Functions'></a><h3>Aggregation Functions</h3>
<p>TDengine supports aggregations over numerical values, they are listed below:</p>
<ul>
<li><p><strong>COUNT</strong></p>
<pre><code class="mysql language-mysql">SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]</code></pre>
<p>Function: return the number of rows.<br />
Return Data Type: integer.<br />
Applicable Data Types: all.<br />
Applied to: table/STable.<br />
Note: 1) * can be used for all columns, as long as a column has non-NULL value, it will be counted; 2) If it is on a specific column, only rows with non-NULL value will be counted </p></li>
<li><p><strong>AVG</strong></p>
<pre><code class="mysql language-mysql">SELECT AVG(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>Function: return the average value of a specific column.<br />
Return Data Type: double.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable. </p></li>
<li><p><strong>WAVG</strong></p>
<pre><code class="mysql language-mysql">SELECT WAVG(field_name) FROM tb_name WHERE clause</code></pre>
<p>Function: return the time-weighted average value of a specific column<br />
Return Data Type: double<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool<br />
Applied to: table/STable</p></li>
<li><p><strong>SUM</strong></p>
<pre><code class="mysql language-mysql">SELECT SUM(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>Function: return the sum of a specific column.<br />
Return Data Type: long integer or double.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable. </p></li>
<li><p><strong>STDDEV</strong></p>
<pre><code class="mysql language-mysql">SELECT STDDEV(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>Function: return the standard deviation of a specific column.<br />
Return Data Type: double.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table. </p></li>
<li><p><strong>LEASTSQUARES</strong></p>
<pre><code class="mysql language-mysql">SELECT LEASTSQUARES(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>Function: performs a linear fit to the primary timestamp and the specified column.
Return Data Type: return a string of the coefficient and the interception of the fitted line.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table.<br />
Note: The timestmap is taken as the independent variable while the specified column value is taken as the dependent variables.</p></li>
</ul>
<a class='anchor' id='Selector-Functions'></a><h3>Selector Functions</h3>
<ul>
<li><p><strong>MIN</strong></p>
<pre><code class="mysql language-mysql">SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]</code></pre>
<p>Function: return the minimum value of a specific column.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable. </p></li>
<li><p><strong>MAX</strong></p>
<pre><code class="mysql language-mysql">SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>Function: return the maximum value of a specific column.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable. </p></li>
<li><p><strong>FIRST</strong></p>
<pre><code class="mysql language-mysql">SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>Function: return the first non-NULL value.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types.<br />
Applied to: table/STable.<br />
Note: To return all columns, use first(*). </p></li>
<li><p><strong>LAST</strong></p>
<pre><code class="mysql language-mysql">SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>Function: return the last non-NULL value.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types.<br />
Applied to: table/STable.<br />
Note: To return all columns, use last(*). </p></li>
<li><p><strong>TOP</strong></p>
<pre><code class="mysql language-mysql">SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>Function: return the <code>k</code> largest values.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable.<br />
Note: 1) valid range of K: 1≤<em>k</em>≤100; 2) the associated time stamp will be returned too. </p></li>
<li><p><strong>BOTTOM</strong></p>
<pre><code class="mysql language-mysql">SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>Function: return the <code>k</code> smallest values.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable.<br />
Note: 1) valid range of K: 1≤<em>k</em>≤100; 2) the associated timestamp will be returned too. </p></li>
<li><p><strong>PERCENTILE</strong></p>
<pre><code class="mysql language-mysql">SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>Function: the value of the specified column below which <code>P</code> percent of the data points fall.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable.<br />
Note: The range of <code>P</code> is <code>[0, 100]</code>. When <code>P=0</code> , <code>PERCENTILE</code> returns the equal value as <code>MIN</code>; when <code>P=100</code>, <code>PERCENTILE</code> returns the equal value as <code>MAX</code>. </p></li>
<li><p><strong>LAST_ROW</strong></p>
<pre><code class="mysql language-mysql">SELECT LAST_ROW(field_name) FROM { tb_name | stb_name } </code></pre>
<p>Function: return the last row.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types.<br />
Applied to: table/STable.<br />
Note: different from last, last_row returns the last row even it has NULL value. </p></li>
</ul>
<a class='anchor' id='Transformation-Functions'></a><h3>Transformation Functions</h3>
<ul>
<li><p><strong>DIFF</strong></p>
<pre><code class="mysql language-mysql">SELECT DIFF(field_name) FROM tb_name [WHERE clause]</code></pre>
<p>Function: return the difference between successive values of the specified column.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table. </p></li>
<li><p><strong>SPREAD</strong></p>
<pre><code class="mysql language-mysql">SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>Function: return the difference between the maximum and the mimimum value.<br />
Return Data Type: the same data type.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable.<br />
Note: spread gives the range of data variation in a table/supertable; it is equivalent to <code>MAX()</code> - <code>MIN()</code></p></li>
<li><p><strong>Arithmetic Operations</strong></p>
<pre><code class="mysql language-mysql">SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]</code></pre>
<p>Function: arithmetic operations on the selected columns.<br />
Return Data Type: double.<br />
Applicable Data Types: all types except timestamp, binary, nchar, bool.<br />
Applied to: table/STable.<br />
Note: 1) bracket can be used for operation priority; 2) If a column has NULL value, the result is NULL. </p></li>
</ul>
<a class='anchor' id='Downsampling'></a><h2>Downsampling</h2>
<p>Time-series data are usually sampled by sensors at a very high frequency, but more often we are only interested in the downsampled, aggregated data of each timeline. TDengine provides a convenient way to downsample the highly frequently sampled data points as well as filling the missing data with a variety of interpolation choices.</p>
<pre><code class="mysql language-mysql">SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
[GROUP BY tags]
INTERVAL (interval)
[FILL ({ VALUE | PREV | NULL | LINEAR})]</code></pre>
<p>The downsampling time window is defined by <code>interval</code>, which is at least 10 milliseconds. The query returns a new series of downsampled data that has a series of fixed timestamps with an increment of <code>interval</code>. </p>
<p>For the time being, only function count, avg, sum, stddev, leastsquares, percentile, min, max, first, last are supported. Functions that may return multiple rows are not supported. </p>
<p>You can also use <code>FILL</code> to interpolate the intervals that don't contain any data.<code>FILL</code> currently supports four different interpolation strategies which are listed below:</p>
<figure><table>
<thead>
<tr>
<th>Interpolation</th>
<th>Usage</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>FILL(VALUE, val1 [, val2, ...])</code></td>
<td>Interpolate with specified constants</td>
</tr>
<tr>
<td><code>FILL(PREV)</code></td>
<td>Interpolate with the value at the previous timestamp</td>
</tr>
<tr>
<td><code>FILL(LINEAR)</code></td>
<td>Linear interpolation with the non-null values at the previous timestamp and at the next timestamp</td>
</tr>
<tr>
<td><code>FILL(NULL)</code></td>
<td>Interpolate with <strong><code>NULL</code></strong> value</td>
</tr>
</tbody>
</table></figure>
<p>A few downsampling examples:</p>
<ul>
<li><p>Find the number of data points, the maximum value of <code>col1</code> and minimum value of <code>col2</code> in a tb1 for every 10 minutes in the last 5 hours:</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts &gt; NOW - 5h INTERVAL (10m)</code></pre></li>
<li><p>Fill the above downsampling results using constant-value interpolation:</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts &gt; NOW - 5h INTERVAL(10m) FILL(VALUE, 0, 1, -1)</code></pre>
<p>Note that the number of constant values in <code>FILL()</code> should be equal or fewer than the number of functions in the <code>SELECT</code> clause. Exceeding fill constants will be ignored.</p></li>
<li><p>Fill the above downsampling results using <code>PREV</code> interpolation:</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts &gt; NOW - 5h INTERVAL(10m) FILL(PREV)</code></pre>
<p>This will interpolate missing data points with the value at the previous timestamp.</p></li>
<li><p>Fill the above downsampling results using <code>NULL</code> interpolation:</p>
<pre><code class="mysql language-mysql">SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts &gt; NOW - 5h INTERVAL(10m) FILL(NULL)</code></pre>
<p>Fill <strong><code>NULL</code></strong> to the interpolated data points.</p></li>
</ul>
<p>Notes:</p>
<ol>
<li><code>FILL</code> can generate tons of interpolated data points if the interval is small and the queried time range is large. So always remember to specify a time range when using interpolation. For each query with interpolation, the result set can not exceed 10,000,000 records.</li>
<li>The result set will always be sorted by time in ascending order.</li>
<li>If the query object is a supertable, then all the functions will be applied to all the tables that qualify the <code>WHERE</code> conditions. If the <code>GROUP BY</code> clause is also applied, the result set will be sorted ascendingly by time in each single group, otherwise, the result set will be sorted ascendingly by time as a whole.</li>
</ol><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

View File

@ -1,175 +0,0 @@
# 与其他工具的连接
## Telegraf
TDengine能够与开源数据采集系统[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)快速集成,整个过程无需任何代码开发。
### 安装Telegraf
目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统到Telegraf官网下载安装包并执行安装。下载地址如下https://portal.influxdata.com/downloads
### 配置Telegraf
修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。
在output plugins部分增加[[outputs.http]]配置项:
- urlhttp://ip:6020/telegraf/udb其中ip为TDengine集群的中任意一台服务器的IP地址6020为TDengine RESTful接口的端口号telegraf为固定关键字udb为用于存储采集数据的数据库名称可预先创建。
- method: "POST"
- username: 登录TDengine的用户名
- password: 登录TDengine的密码
- data_format: "json"
- json_timestamp_units: "1ms"
在agent部分
- hostname: 区分不同采集设备的机器名称,需确保其唯一性
- metric_batch_size: 30允许Telegraf每批次写入记录最大数量增大其数量可以降低Telegraf的请求发送频率但对于TDengine该数值不能超过50
关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。
## Grafana
TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统整个过程无需任何代码开发TDengine中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。
### 安装Grafana
目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统到Grafana官网下载安装包并执行安装。下载地址如下https://grafana.com/grafana/download。
### 配置Grafana
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录下。
以CentOS 7.2操作系统为例将tdengine目录拷贝到/var/lib/grafana/plugins目录下重新启动grafana即可。
### 使用 Grafana
#### 配置数据源
用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
![img](../assets/add_datasource1.jpg)
点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示:
![img](../assets/add_datasource2.jpg)
进入数据源配置页面,按照默认提示修改相应配置即可:
![img](../assets/add_datasource3.jpg)
* Host TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6020),默认 http://localhost:6020。
* UserTDengine 用户名。
* PasswordTDengine 用户密码。
点击 `Save & Test` 进行测试,成功会有如下提示:
![img](../assets/add_datasource4.jpg)
#### 创建 Dashboard
回到主界面创建 Dashboard点击 Add Query 进入面板查询页面:
![img](../assets/create_dashboard1.jpg)
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 sql 进行查询,具体说明如下:
* INPUT SQL输入要查询的语句该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` 其中from、to 和 interval 为 TDengine插件的内置变量表示从Grafana插件面板获取的查询范围和时间间隔。除了内置变量外`也支持可以使用自定义模板变量`。
* ALIAS BY可设置当前查询别名。
* GENERATE SQL 点击该按钮会自动替换相应变量,并生成最终执行的语句。
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
![img](../assets/create_dashboard2.jpg)
> 关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息请参考Grafana官方的[文档](https://grafana.com/docs/)。
#### 导入 Dashboard
在 Grafana 插件目录 /usr/local/taos/connector/grafana/tdengine/dashboard/ 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件:
![img](../assets/import_dashboard1.jpg)
导入完成之后可看到如下效果:
![img](../assets/import_dashboard2.jpg)
## Matlab
MatLab可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。
### MatLab的JDBC接口适配
MatLab的适配有下面几个步骤下面以Windows10上适配MatLab2017a为例
- 将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox
- 将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64
- 将新添加的驱动jar包加入MatLab的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行
`$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar`
- 在${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\下添加一个文件javalibrarypath.txt, 并在该文件中添加taos.dll的路径比如您的taos.dll是在安装时拷贝到了C:\Windows\System32下那么就应该在javalibrarypath.txt中添加如下一行
`C:\Windows\System32`
### 在MatLab中连接TDengine获取数据
在成功进行了上述配置后打开MatLab。
- 创建一个连接:
`conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)`
- 执行一次查询:
`sql0 = [select * from tb]`
`data = select(conn, sql0);`
- 插入一条记录:
`sql1 = [insert into tb values (now, 1)]`
`exec(conn, sql1)`
更多例子细节请参考安装包内examples\Matlab\TDengineDemo.m文件。
## R
R语言支持通过JDBC接口来连接TDengine数据库。首先需要安装R语言的JDBC包。启动R语言环境然后执行以下命令安装R语言的JDBC支持库
```R
install.packages('rJDBC', repos='http://cran.us.r-project.org')
```
安装完成以后,通过执行`library('RJDBC')`命令加载 _RJDBC_ 包:
然后加载TDengine的JDBC驱动
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-1.0.0-dist.jar", identifier.quote="\"")
```
如果执行成功,不会出现任何错误信息。之后通过以下命令尝试连接数据库:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
注意将上述命令中的IP地址替换成正确的IP地址。如果没有任务错误的信息则连接数据库成功否则需要根据错误提示调整连接的命令。TDengine支持以下的 _RJDBC_ 包中函数:
- dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)将数据框iris写入表test中overwrite必须设置为falseappend必须设为TRUE,且数据框iris要与表test的结构一致。
- dbGetQuery(conn, "select count(*) from test"):查询语句
- dbSendUpdate(conn, "use db")执行任何非查询sql语句。例如dbSendUpdate(conn, "use db") 写入数据dbSendUpdate(conn, "insert into t1 values(now, 99)")等。
- dbReadTable(conn, "test")读取表test中数据
- dbDisconnect(conn):关闭连接
- dbRemoveTable(conn, "test")删除表test
TDengine客户端暂不支持如下函数
- dbExistsTable(conn, "test")是否存在表test
- dbListTables(conn):显示连接中的所有表

View File

@ -1,167 +0,0 @@
# Connect with other tools
## Telegraf
TDengine is easy to integrate with [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/), an open-source server agent for collecting and sending metrics and events, without more development.
### Install Telegraf
At present, TDengine supports Telegraf newer than version 1.7.4. Users can go to the [download link] and choose the proper package to install on your system.
### Configure Telegraf
Telegraf is configured by changing items in the configuration file */etc/telegraf/telegraf.conf*.
In **output plugins** sectionadd _[[outputs.http]]_ iterm
- _url_: http://ip:6020/telegraf/udb, in which _ip_ is the IP address of any node in TDengine cluster. Port 6020 is the RESTful APT port used by TDengine. _udb_ is the name of the database to save data, which needs to create beforehand.
- _method_: "POST"
- _username_: username to login TDengine
- _password_: password to login TDengine
- _data_format_: "json"
- _json_timestamp_units_: "1ms"
In **agent** part
- hostname: used to distinguish different machines. Need to be unique.
- metric_batch_size: 30the maximum number of records allowed to write in Telegraf. The larger the value is, the less frequent requests are sent. For TDengine, the value should be less than 50.
Please refer to the [Telegraf docs](https://docs.influxdata.com/telegraf/v1.11/) for more information.
## Grafana
[Grafana] is an open-source system for time-series data display. It is easy to integrate TDengine and Grafana to build a monitor system. Data saved in TDengine can be fetched and shown on the Grafana dashboard.
### Install Grafana
For now, TDengine only supports Grafana newer than version 5.2.4. Users can go to the [Grafana download page] for the proper package to download.
### Configure Grafana
TDengine Grafana plugin is in the _/usr/local/taos/connector/grafana_ directory.
Taking Centos 7.2 as an example, just copy TDengine directory to _/var/lib/grafana/plugins_ directory and restart Grafana.
### Use Grafana
Users can log in the Grafana server (username/password:admin/admin) through localhost:3000 to configure TDengine as the data source. As is shown in the picture below, TDengine as a data source option is shown in the box:
![img](../assets/clip_image001.png)
When choosing TDengine as the data source, the Host in HTTP configuration should be configured as the IP address of any node of a TDengine cluster. The port should be set as 6020. For example, when TDengine and Grafana are on the same machine, it should be configured as _http://localhost:6020.
Besides, users also should set the username and password used to log into TDengine. Then click _Save&Test_ button to save.
![img](../assets/clip_image001-2474914.png)
Then, TDengine as a data source should show in the Grafana data source list.
![img](../assets/clip_image001-2474939.png)
Then, users can create Dashboards in Grafana using TDengine as the data source:
![img](../assets/clip_image001-2474961.png)
Click _Add Query_ button to add a query and input the SQL command you want to run in the _INPUT SQL_ text box. The SQL command should expect a two-row, multi-column result, such as _SELECT count(*) FROM sys.cpu WHERE ts>=from and ts<to interval(interval)_, in which, _from_, _to_ and _inteval_ are TDengine inner variables representing query time range and time interval.
_ALIAS BY_ field is to set the query alias. Click _GENERATE SQL_ to send the command to TDengine:
![img](../assets/clip_image001-2474987.png)
Please refer to the [Grafana official document] for more information about Grafana.
## Matlab
Matlab can connect to and retrieve data from TDengine by TDengine JDBC Driver.
### MatLab and TDengine JDBC adaptation
Several steps are required to adapt Matlab to TDengine. Taking adapting Matlab2017a on Windows10 as an example:
1. Copy the file _JDBCDriver-1.0.0-dist.jar_ in TDengine package to the directory _${matlab_root}\MATLAB\R2017a\java\jar\toolbox_
2. Copy the file _taos.lib_ in TDengine package to _${matlab_ root _dir}\MATLAB\R2017a\lib\win64_
3. Add the .jar package just copied to the Matlab classpath. Append the line below as the end of the file of _${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt_
`$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar`
4. Create a file called _javalibrarypath.txt_ in directory _${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\_, and add the _taos.dll_ path in the file. For example, if the file _taos.dll_ is in the directory of _C:\Windows\System32_then add the following line in file *javalibrarypath.txt*:
`C:\Windows\System32`
### TDengine operations in Matlab
After correct configuration, open Matlab:
- build a connection
`conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)`
- Query
`sql0 = [select * from tb]`
`data = select(conn, sql0);`
- Insert a record:
`sql1 = [insert into tb values (now, 1)]`
`exec(conn, sql1)`
Please refer to the file _examples\Matlab\TDengineDemo.m_ for more information.
## R
Users can use R language to access the TDengine server with the JDBC interface. At first, install JDBC package in R:
```R
install.packages('rJDBC', repos='http://cran.us.r-project.org')
```
Then use _library_ function to load the package:
```R
library('RJDBC')
```
Then load the TDengine JDBC driver:
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-1.0.0-dist.jar", identifier.quote="\"")
```
If succeed, no error message will display. Then use the following command to try a database connection:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully. TDengine supports below functions in _RJDBC_ package:
- _dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)_: write the data in a data frame _iris_ to the table _test_ in the TDengine server. Parameter _overwrite_ must be _false_. _append_ must be _TRUE_ and the schema of the data frame _iris_ should be the same as the table _test_.
- _dbGetQuery(conn, "select count(*) from test")_: run a query command
- _dbSendUpdate(conn, "use db")_: run any non-query command.
- _dbReadTable(conn, "test"_): read all the data in table _test_
- _dbDisconnect(conn)_: close a connection
- _dbRemoveTable(conn, "test")_: remove table _test_
Below functions are **not supported** currently:
- _dbExistsTable(conn, "test")_: if talbe _test_ exists
- _dbListTables(conn)_: list all tables in the connection
[Telegraf]: www.taosdata.com
[download link]: https://portal.influxdata.com/downloads
[Telegraf document]: www.taosdata.com
[Grafana]: https://grafana.com
[Grafana download page]: https://grafana.com/grafana/download
[Grafana official document]: https://grafana.com/docs/

View File

@ -1,918 +0,0 @@
# TDengine connectors
TDengine provides many connectors for development, including C/C++, JAVA, Python, RESTful, Go, Node.JS, etc.
NOTE: All APIs which require a SQL string as parameter, including but not limit to `taos_query`, `taos_query_a`, `taos_subscribe` in the C/C++ Connector and their counterparts in other connectors, can ONLY process one SQL statement at a time. If more than one SQL statements are provided, their behaviors are undefined.
## C/C++ API
C/C++ APIs are similar to the MySQL APIs. Applications should include TDengine head file _taos.h_ to use C/C++ APIs by adding the following line in code:
```C
#include <taos.h>
```
Make sure TDengine library _libtaos.so_ is installed and use _-ltaos_ option to link the library when compiling. In most cases, if the return value of an API is integer, it return _0_ for success and other values as an error code for failure; if the return value is pointer, then _NULL_ is used for failure.
### C/C++ sync API
Sync APIs are those APIs waiting for responses from the server after sending a request. TDengine has the following sync APIs:
- `TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)`
Open a connection to a TDengine server. The parameters are _ip_ (IP address of the server), _user_ (username to login), _pass_ (password to login), _db_ (database to use after connection) and _port_ (port number to connect). The parameter _db_ can be NULL for no database to use after connection. Otherwise, the database should exist before connection or a connection error is reported. The handle returned by this API should be kept for future use.
- `void taos_close(TAOS *taos)`
Close a connection to a TDengine server by the handle returned by _taos_connect_`
- `int taos_query(TAOS *taos, char *sqlstr)`
The API used to run a SQL command. The command can be DQL or DML. The parameter _taos_ is the handle returned by _taos_connect_. Return value _-1_ means failure.
- `TAOS_RES *taos_use_result(TAOS *taos)`
Use the result after running _taos_query_. The handle returned should be kept for future fetch.
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
Fetch a row of return results through _res_, the handle returned by _taos_use_result_.
- `int taos_num_fields(TAOS_RES *res)`
Get the number of fields in the return result.
- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)`
Fetch the description of each field. The description includes the property of data type, field name, and bytes. The API should be used with _taos_num_fields_ to fetch a row of data.
- `void taos_free_result(TAOS_RES *res)`
Free the resources used by a result set. Make sure to call this API after fetching results or memory leak would happen.
- `void taos_init()`
Initialize the environment variable used by TDengine client. The API is not necessary since it is called int _taos_connect_ by default.
- `char *taos_errstr(TAOS *taos)`
Return the reason of the last API call failure. The return value is a string.
- `int *taos_errno(TAOS *taos)`
Return the error code of the last API call failure. The return value is an integer.
- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
Set client options. The parameter _option_ supports values of _TSDB_OPTION_CONFIGDIR_ (configuration directory), _TSDB_OPTION_SHELL_ACTIVITY_TIMER_, _TSDB_OPTION_LOCALE_ (client locale) and _TSDB_OPTION_TIMEZONE_ (client timezone).
The 12 APIs are the most important APIs frequently used. Users can check _taos.h_ file for more API information.
**Note**: The connection to a TDengine server is not multi-thread safe. So a connection can only be used by one thread.
### C/C++ parameter binding API
TDengine also provides parameter binding APIs, like MySQL, only question mark `?` can be used to represent a parameter in these APIs.
- `TAOS_STMT* taos_stmt_init(TAOS *taos)`
Create a TAOS_STMT to represent the prepared statement for other APIs.
- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
Parse SQL statement _sql_ and bind result to _stmt_ , if _length_ larger than 0, its value is used to determine the length of _sql_, the API auto detects the actual length of _sql_ otherwise.
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
Bind values to parameters. _bind_ points to an array, the element count and sequence of the array must be identical as the parameters of the SQL statement. The usage of _TAOS_BIND_ is same as _MYSQL_BIND_ in MySQL, its definition is as below:
```c
typedef struct TAOS_BIND {
int buffer_type;
void * buffer;
unsigned long buffer_length; // not used in TDengine
unsigned long *length;
int * is_null;
int is_unsigned; // not used in TDengine
int * error; // not used in TDengine
} TAOS_BIND;
```
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
Add bound parameters to batch, client can call `taos_stmt_bind_param` again after calling this API. Note this API only support _insert_ / _import_ statements, it returns an error in other cases.
- `int taos_stmt_execute(TAOS_STMT *stmt)`
Execute the prepared statement. This API can only be called once for a statement at present.
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
Acquire the result set of an executed statement. The usage of the result is same as `taos_use_result`, `taos_free_result` must be called after one you are done with the result set to release resources.
- `int taos_stmt_close(TAOS_STMT *stmt)`
Close the statement, release all resources.
### C/C++ async API
In addition to sync APIs, TDengine also provides async APIs, which are more efficient. Async APIs are returned right away without waiting for a response from the server, allowing the application to continute with other tasks without blocking. So async APIs are more efficient, especially useful when in a poor network.
All async APIs require callback functions. The callback functions have the format:
```C
void fp(void *param, TAOS_RES * res, TYPE param3)
```
The first two parameters of the callback function are the same for all async APIs. The third parameter is different for different APIs. Generally, the first parameter is the handle provided to the API for action. The second parameter is a result handle.
- `void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int code), void *param);`
The async query interface. _taos_ is the handle returned by _taos_connect_ interface. _sqlstr_ is the SQL command to run. _fp_ is the callback function. _param_ is the parameter required by the callback function. The third parameter of the callback function _code_ is _0_ (for success) or a negative number (for failure, call taos_errstr to get the error as a string). Applications mainly handle with the second parameter, the returned result set.
- `void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);`
The async API to fetch a batch of rows, which should only be used with a _taos_query_a_ call. The parameter _res_ is the result handle returned by _taos_query_a_. _fp_ is the callback function. _param_ is a user-defined structure to pass to _fp_. The parameter _numOfRows_ is the number of result rows in the current fetch cycle. In the callback function, applications should call _taos_fetch_row_ to get records from the result handle. After getting a batch of results, applications should continue to call _taos_fetch_rows_a_ API to handle the next batch, until the _numOfRows_ is _0_ (for no more data to fetch) or _-1_ (for failure).
- `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);`
The async API to fetch a result row. _res_ is the result handle. _fp_ is the callback function. _param_ is a user-defined structure to pass to _fp_. The third parameter of the callback function is a single result row, which is different from that of _taos_fetch_rows_a_ API. With this API, it is not necessary to call _taos_fetch_row_ to retrieve each result row, which is handier than _taos_fetch_rows_a_ but less efficient.
Applications may apply operations on multiple tables. However, **it is important to make sure the operations on the same table are serialized**. That means after sending an insert request in a table to the server, no operations on the table are allowed before a response is received.
### C/C++ continuous query interface
TDengine provides APIs for continuous query driven by time, which run queries periodically in the background. There are only two APIs:
- `TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *));`
The API is used to create a continuous query.
* _taos_: the connection handle returned by _taos_connect_.
* _sqlstr_: the SQL string to run. Only query commands are allowed.
* _fp_: the callback function to run after a query
* _param_: a parameter passed to _fp_
* _stime_: the time of the stream starts in the form of epoch milliseconds. If _0_ is given, the start time is set as the current time.
* _callback_: a callback function to run when the continuous query stops automatically.
The API is expected to return a handle for success. Otherwise, a NULL pointer is returned.
- `void taos_close_stream (TAOS_STREAM *tstr)`
Close the continuous query by the handle returned by _taos_open_stream_. Make sure to call this API when the continuous query is not needed anymore.
### C/C++ subscription API
For the time being, TDengine supports subscription on one or multiple tables. It is implemented through periodic pulling from a TDengine server.
* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)`
The API is used to start a subscription session, it returns the subscription object on success and `NULL` in case of failure, the parameters are:
* **taos**: The database connnection, which must be established already.
* **restart**: `Zero` to continue a subscription if it already exits, other value to start from the beginning.
* **topic**: The unique identifier of a subscription.
* **sql**: A sql statement for data query, it can only be a `select` statement, can only query for raw data, and can only query data in ascending order of the timestamp field.
* **fp**: A callback function to receive query result, only used in asynchronization mode and should be `NULL` in synchronization mode, please refer below for its prototype.
* **param**: User provided additional parameter for the callback function.
* **interval**: Pulling interval in millisecond. Under asynchronization mode, API will call the callback function `fp` in this interval, system performance will be impacted if this interval is too short. Under synchronization mode, if the duration between two call to `taos_consume` is less than this interval, the second call blocks until the duration exceed this interval.
* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`
Prototype of the callback function, the parameters are:
* tsub: The subscription object.
* res: The query result.
* param: User provided additional parameter when calling `taos_subscribe`.
* code: Error code in case of failures.
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
The API used to get the new data from a TDengine server. It should be put in an loop. The parameter `tsub` is the handle returned by `taos_subscribe`. This API should only be called in synchronization mode. If the duration between two call to `taos_consume` is less than pulling interval, the second call blocks until the duration exceed the interval. The API returns the new rows if new data arrives, or empty rowset otherwise, and if there's an error, it returns `NULL`.
* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
Stop a subscription session by the handle returned by `taos_subscribe`. If `keepProgress` is **not** zero, the subscription progress information is kept and can be reused in later call to `taos_subscribe`, the information is removed otherwise.
## Java Connector
To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1].
Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver:
* libtaos.so (Linux)
After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path.
* taos.dll (Windows)
After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path.
> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15].
Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver:
* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method.
* No support for transaction
* No support for union between tables
* No support for nested query`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`.
## Version list of TAOS-JDBCDriver and required TDengine and JDK
| taos-jdbcdriver | TDengine | JDK |
| --- | --- | --- |
| 1.0.3 | 1.6.1.x or higher | 1.8.x |
| 1.0.2 | 1.6.1.x or higher | 1.8.x |
| 1.0.1 | 1.6.1.x or higher | 1.8.x |
## DataType in TDengine and Java
The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java:
| TDengine | Java |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## How to get TAOS-JDBC Driver
### maven repository
taos-jdbcdriver has been published to [Sonatype Repository][1]:
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
Using the following pom.xml for maven projects
```xml
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>1.0.3</version>
</dependency>
</dependencies>
```
### JAR file from the source code
After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated.
## Usage
### get the connection
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> `6030` is the default port and `log` is the default database for system monitor.
A normal JDBC URL looks as follows:
`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes:
* useruser name for login, defaultly root。
* passwordpassword for logindefaultly taosdata。
* charsetcharset for clientdefaultly system charset
* cfgdirlog directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。
* localelanguage for clientdefaultly system locale。
* timezonetimezone for clientdefaultly system timezone。
The options above can be configures (`ordered by priority`):
1. JDBC URL
As explained above.
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
3. Configuration file (taos.cfg)
Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows
```properties
# client default username
# defaultUser root
# client default password
# defaultPass taosdata
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
> More options can refer to [client configuration][13]
### Create databases and tables
```java
Statement stmt = conn.createStatement();
// create database
stmt.executeUpdate("create database if not exists db");
// use database
stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables
### Insert data
```java
// insert data
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
System.out.println("insert " + affectedRows + " rows.");
```
> _now_ is the server time.
> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year).
### Query database
```java
// query data
ResultSet resultSet = stmt.executeQuery("select * from tb");
Timestamp ts = null;
int temperature = 0;
float humidity = 0;
while(resultSet.next()){
ts = resultSet.getTimestamp(1);
temperature = resultSet.getInt(2);
humidity = resultSet.getFloat("humidity");
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results.
### Close all
```java
resultSet.close();
stmt.close();
conn.close();
```
> `please make sure the connection is closed to avoid the error like connection leakage`
## Using connection pool
**HikariCP**
* dependence in pom.xml
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* Examples
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool.
> More instructions can refer to [User Guide][5]
**Druid**
* dependency in pom.xml
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* Examples
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> More instructions can refer to [User Guide][6]
**Notice**
* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`.
As follows`1` will be returned if `select server_status()` is successfully executed。
```shell
taos> select server_status();
server_status()|
================
1 |
Query OK, 1 row(s) in set (0.000141s)
```
## Integrated with framework
* Please refer to [SpringJdbcTemplate][11] if using taos-jdbcdriver in Spring JdbcTemplate
* Please refer to [springbootdemo][12] if using taos-jdbcdriver in Spring JdbcTemplate
## FAQ
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**Cause**The application program cannot find Library function _taos_
**Answer**Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux.
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**Cause**Currently TDengine only support 64bit JDK
**Answer**re-install 64bit JDK.
* For other questions, please refer to [Issues][7]
## Python Connector
### Pre-requirement
* TDengine installed, TDengine-client installed if on Windows [(Windows TDengine client installation)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口)
* python 2.7 or >= 3.4
* pip installed
### Installation
#### Linux
Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
```cmd
pip install src/connector/python/linux/python3/
```
or
```
pip install src/connector/python/linux/python2/
```
#### Windows
Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the _cmd_ Windows command interface
```
cd C:\TDengine\connector\python\windows
pip install python3\
```
or
```
cd C:\TDengine\connector\python\windows
pip install python2\
```
*If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
### Usage
#### Examples
* import TDengine module
```python
import taos
```
* get the connection
```python
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
*<em>host</em> is the IP of TDengine server, and <em>config</em> is the directory where exists the TDengine client configure file
* insert records into the database
```python
import datetime
# create a database
c1.execute('create database db')
c1.execute('use db')
# create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# insert a record
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# insert multiple records in a batch
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
```
* query the database
```python
c1.execute('select * from tb')
# fetch all returned results
data = c1.fetchall()
# data is a list of returned rows with each row being a tuple
numOfRows = c1.rowcount
numOfCols = len(c1.description)
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]))
# use the cursor as an iterator to retrieve all returned results
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
```
* create a subscription
```python
# Create a subscription with topic 'test' and consumption interval 1000ms.
# The first argument is True means to restart the subscription;
# if the subscription with topic 'test' has already been created, then pass
# False to this argument means to continue the existing subscription.
sub = conn.subscribe(True, "test", "select * from meters;", 1000)
```
* consume a subscription
```python
data = sub.consume()
for d in data:
print(d)
```
* close the subscription
```python
sub.close()
```
* close the connection
```python
c1.close()
conn.close()
```
#### Help information
Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:
- _TDengineConnection_ class
Run `help(taos.TDengineConnection)` in python terminal for details.
- _TDengineCursor_ class
Run `help(taos.TDengineCursor)` in python terminal for details.
- connect method
Open a connection. Run `help(taos.connect)` in python terminal for details.
## RESTful Connector
TDengine also provides RESTful API to satisfy developing on different platforms. Unlike other databases, TDengine RESTful API applies operations to the database through the SQL command in the body of HTTP POST request. What users are required to provide is just a URL.
For the time being, TDengine RESTful API uses a _\<TOKEN\>_ generated from username and password for identification. Safer identification methods will be provided in the future.
### HTTP URL encoding
To use TDengine RESTful API, the URL should have the following encoding format:
```
http://<ip>:<PORT>/rest/sql
```
- _ip_: IP address of any node in a TDengine cluster
- _PORT_: TDengine HTTP service port. It is 6020 by default.
For example, the URL encoding _http://192.168.0.1:6020/rest/sql_ used to send HTTP request to a TDengine server with IP address as 192.168.0.1.
It is required to add a token in an HTTP request header for identification.
```
Authorization: Basic <TOKEN>
```
The HTTP request body contains the SQL command to run. If the SQL command contains a table name, it should also provide the database name it belongs to in the form of `<db_name>.<tb_name>`. Otherwise, an error code is returned.
For example, use _curl_ command to send a HTTP request:
```
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql
```
or use
```
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
```
where `TOKEN` is the encryted string of `{username}:{password}` using the Base64 algorithm, e.g. `root:taosdata` will be encoded as `cm9vdDp0YW9zZGF0YQ==`
### HTTP response
The HTTP resonse is in JSON format as below:
```
{
"status": "succ",
"head": ["column1","column2", …],
"data": [
["2017-12-12 23:44:25.730", 1],
["2017-12-12 22:44:25.728", 4]
],
"rows": 2
}
```
Specifically,
- _status_: the result of the operation, success or failure
- _head_: description of returned result columns
- _data_: the returned data array. If no data is returned, only an _affected_rows_ field is listed
- _rows_: the number of rows returned
### Example
- Use _curl_ command to query all the data in table _t1_ of database _demo_:
`curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql`
The return value is like:
```
{
"status": "succ",
"head": ["column1","column2","column3"],
"data": [
["2017-12-12 23:44:25.730", 1, 2.3],
["2017-12-12 22:44:25.728", 4, 5.6]
],
"rows": 2
}
```
- Use HTTP to create a database
`curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql`
The return value should be:
```
{
"status": "succ",
"head": ["affected_rows"],
"data": [[1]],
"rows": 1,
}
```
## Go Connector
TDengine also provides a Go client package named _taosSql_ for users to access TDengine with Go. The package is in _/usr/local/taos/connector/go/src/taosSql_ by default if you installed TDengine. Users can copy the directory _/usr/local/taos/connector/go/src/taosSql_ to the _src_ directory of your project and import the package in the source code for use.
```Go
import (
"database/sql"
_ "taosSql"
)
```
The _taosSql_ package is in _cgo_ form, which calls TDengine C/C++ sync interfaces. So a connection is allowed to be used by one thread at the same time. Users can open multiple connections for multi-thread operations.
Please refer the the demo code in the package for more information.
## Node.js Connector
TDengine also provides a node.js connector package that is installable through [npm](https://www.npmjs.com/). The package is also in our source code at *src/connector/nodejs/*. The following instructions are also available [here](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs)
To get started, just type in the following to install the connector through [npm](https://www.npmjs.com/).
```cmd
npm install td-connector
```
It is highly suggested you use npm. If you don't have it installed, you can also just copy the nodejs folder from *src/connector/nodejs/* into your node project folder.
To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp)
### On Unix
- `python` (`v2.7` recommended, `v3.x.x` is **not** supported)
- `make`
- A proper C/C++ compiler toolchain, like [GCC](https://gcc.gnu.org)
### On macOS
- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) (already installed on macOS)
- Xcode
- You also need to install the
```
Command Line Tools
```
via Xcode. You can find this under the menu
```
Xcode -> Preferences -> Locations
```
(or by running
```
xcode-select --install
```
in your Terminal)
- This step will install `gcc` and the related toolchain containing `make`
### On Windows
#### Option 1
Install all the required tools and configurations using Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) using `npm install --global --production windows-build-tools` from an elevated PowerShell or CMD.exe (run as Administrator).
#### Option 2
Install tools and configuration manually:
- Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) (using "Visual C++ build tools" workload) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) (using the "Desktop development with C++" workload)
- Install [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` is not supported), and run `npm config set python python2.7` (or see below for further instructions on specifying the proper Python version and path.)
- Launch cmd, `npm config set msvs_version 2017`
If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips.
To target native ARM64 Node.js on Windows 10 on ARM, add the components "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
### Usage
The following is a short summary of the basic usage of the connector, the full api and documentation can be found [here](http://docs.taosdata.com/node)
#### Connection
To use the connector, first require the library ```td-connector```. Running the function ```taos.connect``` with the connection options passed in as an object will return a TDengine connection object. The required connection option is ```host```, other options if not set, will be the default values as shown below.
A cursor also needs to be initialized in order to interact with TDengine from Node.js.
```javascript
const taos = require('td-connector');
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
var cursor = conn.cursor(); // Initializing a new cursor
```
To close a connection, run
```javascript
conn.close();
```
#### Queries
We can now start executing simple queries through the ```cursor.query``` function, which returns a TaosQuery object.
```javascript
var query = cursor.query('show databases;')
```
We can get the results of the queries through the ```query.execute()``` function, which returns a promise that resolves with a TaosResult object, which contains the raw data and additional functionalities such as pretty printing the results.
```javascript
var promise = query.execute();
promise.then(function(result) {
result.pretty(); //logs the results to the console as if you were in the taos shell
});
```
You can also query by binding parameters to a query by filling in the question marks in a string as so. The query will automatically parse what was binded and convert it to the proper format for use with TDengine
```javascript
var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
query.execute().then(function(result) {
result.pretty();
})
```
The TaosQuery object can also be immediately executed upon creation by passing true as the second argument, returning a promise instead of a TaosQuery.
```javascript
var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
promise.then(function(result) {
result.pretty();
})
```
#### Async functionality
Async queries can be performed using the same functions such as `cursor.execute`, `TaosQuery.execute`, but now with `_a` appended to them.
Say you want to execute an two async query on two seperate tables, using `cursor.query`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object.
```javascript
var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
promise1.then(function(result) {
result.pretty();
})
promise2.then(function(result) {
result.pretty();
})
```
### Example
An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) (The preferred method for using the connector)
An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
[5]: https://github.com/brettwooldridge/HikariCP
[6]: https://github.com/alibaba/druid
[7]: https://github.com/taosdata/TDengine/issues
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/documentation/connector/#Windows%E5%AE%A2%E6%88%B7%E7%AB%AF%E5%8F%8A%E7%A8%8B%E5%BA%8F%E6%8E%A5%E5%8F%A3
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B

View File

@ -1,35 +0,0 @@
# TaosData Contributor License Agreement
This TaosData Contributor License Agreement (CLA) applies to any contribution you make to any TaosData projects. If you are representing your employing organization to sign this agreement, please warrant that you have the authority to grant the agreement.
## Terms
**"TaosData"**, **"we"**, **"our"** and **"us"** means TaosData, inc.
**"You"** and **"your"** means you or the organization you are on behalf of to sign this agreement.
**"Contribution"** means any original work you, or the organization you represent submit to TaosData for any project in any manner.
## Copyright License
All rights of your Contribution submitted to TaosData in any manner are granted to TaosData and recipients of software distributed by TaosData. You waive any rights that my affect our ownership of the copyright and grant to us a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable, and sublicensable license to use, reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Contributions and any derivative work created based on a Contribution.
## Patent License
With respect to any patents you own or that you can license without payment to any third party, you grant to us and to any recipient of software distributed by us, a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable patent license to make, have make, use, sell, offer to sell, import, and otherwise transfer the Contribution in whole or in part, alone or included in any product under any patent you own, or license from a third party, that is necessarily infringed by the Contribution or by combination of the Contribution with any Work.
## Your Representations and Warranties
You represent and warrant that:
- the Contribution you submit is an original work that you can legally grant the rights set out in this agreement.
- the Contribution you submit and licenses you granted does not and will not, infringe the rights of any third party.
- you are not aware of any pending or threatened claims, suits, actions, or charges pertaining to the contributions. You also warrant to notify TaosData immediately if you become aware of any such actual or potential claims, suits, actions, allegations or charges.
## Support
You are not obligated to support your Contribution except you volunteer to provide support. If you want, you can provide for a fee.
**I agree and accept on behalf of myself and behalf of my organization:**

View File

@ -1,100 +0,0 @@
# 数据模型和设计
## 数据模型
### 物联网典型场景
在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据类似如下的表格:
| Device ID | Time Stamp | Value 1 | Value 2 | Value 3 | Tag 1 | Tag 2 |
| :-------: | :-----------: | :-----: | :-----: | :-----: | :---: | :---: |
| D1001 | 1538548685000 | 10.3 | 219 | 0.31 | Red | Tesla |
| D1002 | 1538548684000 | 10.2 | 220 | 0.23 | Blue | BMW |
| D1003 | 1538548686500 | 11.5 | 221 | 0.35 | Black | Honda |
| D1004 | 1538548685500 | 13.4 | 223 | 0.29 | Red | Volvo |
| D1001 | 1538548695000 | 12.6 | 218 | 0.33 | Red | Tesla |
| D1004 | 1538548696600 | 11.8 | 221 | 0.28 | Black | Honda |
每一条记录都有设备ID时间戳采集的物理量还有与每个设备相关的静态标签。每个设备是受外界的触发或按照设定的周期采集数据。采集的数据点是时序的是一个数据流。
### 数据特征
除时序特征外,仔细研究发现,物联网、车联网、运维监测类数据还具有很多其他明显的特征。
1. 数据是结构化的;
2. 数据极少有更新或删除操作;
3. 无需传统数据库的事务处理;
4. 相对互联网应用,写多读少;
5. 流量平稳,根据设备数量和采集频次,可以预测出来;
6. 用户关注的是一段时间的趋势,而不是某一特点时间点的值;
7. 数据是有保留期限的;
8. 数据的查询分析一定是基于时间段和地理区域的;
9. 除存储查询外,还往往需要各种统计和实时计算操作;
10. 数据量巨大一天采集的数据就可以超过100亿条。
充分利用上述特征TDengine采取了一特殊的优化的存储和计算设计来处理时序数据能将系统处理能力显著提高。
### 关系型数据库模型
因为采集的数据一般是结构化数据而且为降低学习门槛TDengine采用传统的关系型数据库模型管理数据。因此用户需要先创建库然后创建表之后才能插入或查询数据。
### 一个设备一张表
为充分利用其数据的时序性和其他数据特点TDengine要求**对每个数据采集点单独建表**比如有一千万个智能电表就需创建一千万张表上述表格中的D1001, D1002, D1003, D1004都需单独建表用来存储这个采集点所采集的时序数据。这种设计能保证一个采集点的数据在存储介质上是一块一块连续的大幅减少随机读取操作成数量级的提升读取和查询速度。而且由于不同数据采集设备产生数据的过程完全独立每个设备只产生属于自己的数据一张表也就只有一个写入者。这样每个表就可以采用无锁方式来写写入速度就能大幅提升。同时对于一个数据采集点而言其产生的数据是时序的因此写的操作可用追加的方式实现进一步大幅提高数据写入速度。
### 数据建模最佳实践
**表(Table)**TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。每个数据采集点可能同时采集多个物理量(如上表中的value1, value2, value3),每个物理量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。有的设备有多组采集量每一组的采集频次是不一样的这是需要对同一个设备建多张表。对采集的数据TDengine将自动按照时间戳建立索引但对采集的物理量不建任何索引。数据是用列式存储方式保存。
**超级表(Super Table)**对于同一类型的采集点为保证Schema的一致性而且为便于聚合统计操作可以先定义超级表STable(详见第10章),然后再定义表。每个采集点往往还有静态标签信息(如上表中的Tag 1, Tag 2),比如设备型号、颜色等,这些静态信息不会保存在存储采集数据的数据节点中,而是通过超级表保存在元数据节点中。这些静态标签信息将作为过滤条件,用于采集点之间的数据聚合统计操作。
**库(DataBase)**不同的数据采集点往往具有不同的数据特征包括数据采集频率高低数据保留时间长短备份数目单个字段大小等等。为让各种场景下TDengine都能最大效率的工作TDengine建议将不同数据特征的表创建在不同的库里。创建一个库时除SQL标准的选项外应用还可以指定保留时长、数据备份的份数、cache大小、文件块大小、是否压缩等多种参数(详见第19章)。
**Schemaless vs Schema**: 与NoSQL的各种引擎相比由于应用需要定义schema插入数据的灵活性降低。但对于物联网、金融这些典型的时序数据场景schema会很少变更因此这个灵活性不够的设计就不成问题。相反TDengine采用结构化数据来进行处理的方式将让查询、分析的性能成数量级的提升。
TDengine对库的数量、超级表的数量以及表的数量没有做任何限制而且其多少不会对性能产生影响应用按照自己的场景创建即可。
## 主要模块
如图所示TDengine服务主要包含两大模块**管理节点模块(MGMT)** 和 **数据节点模块(DNODE)**。整个TDengine还包含**客户端模块**。
<center> <img src="../assets/structure.png"> </center>
<center> 图 1 TDengine架构示意图 </center>
### 管理节点模块
管理节点模块主要负责元数据的存储和查询等工作其中包括用户信息的管理、数据库和表信息的创建、删除以及查询等。应用连接TDengine时会首先连接到管理节点。在创建/删除数据库和表时,请求也会首先发送请求到管理节点模块。由管理节点模块首先创建/删除元数据信息,然后发送请求到数据节点模块进行分配/删除所需要的资源。在数据写入和查询时,应用同样会首先访问管理节点模块,获取元数据信息。然后根据元数据管理信息访问数据节点模块。
### 数据节点模块
写入数据的存储和查询工作是由数据节点模块负责。 为了更高效地利用资源以及方便将来进行水平扩展TDengine内部对数据节点进行了虚拟化引入了虚拟节点(virtual node, 简称vnode的概念作为存储、资源分配以及数据备份的单元。如图2所示在一个dnode上通过虚拟化可以将该dnode视为多个虚拟节点的集合。
创建一个库时系统会自动分配vnode。每个vnode存储一定数量的表中的数据但一个表只会存在于一个vnode里不会跨vnode。一个vnode只会属于一个库但一个库会有一到多个vnode。不同的vnode之间资源互不共享。每个虚拟节点都有自己的缓存在硬盘上也有自己的存储目录。而同一vnode内部无论是缓存还是硬盘的存储都是共享的。通过虚拟化TDengine可以将dnode上有限的物理资源合理地分配给不同的vnode大大提高资源的利用率和并发度。一台物理机器上的虚拟节点个数可以根据其硬件资源进行配置。
<center> <img src="../assets/vnode.png"> </center>
<center> 图 2 TDengine虚拟化 </center>
### 客户端模块
TDengine客户端模块主要负责将应用传来的请求SQL语句进行解析转化为内部结构体再发送到服务端。TDengine的各种接口都是基于TDengine的客户端模块进行开发的。客户端模块与管理模块使用TCP/UDP通讯端口号由系统参数mgmtShellPort配置, 缺省值为6030。客户端与数据节点模块也是使用TCP/UDP通讯端口号由系统参数vnodeShellPort配置, 缺省值为6035。两个端口号均可通过<a href="../administrator/#Configuration-on-Server">系统配置文件taos.cfg</a>进行个性化设置。
## 写入流程
TDengine的完整写入流程如图3所示。为了保证写入数据的安全性和完整性TDengine在写入数据时采用[预写日志算法]。客户端发来的数据在经过验证以后首先会写入预写日志中以保证TDengine能够在断电等因素导致的服务重启时从预写日志中恢复数据避免数据的丢失。写入预写日志后数据会被写到对应的vnode的缓存中。随后服务端会发送确认信息给客户端表示写入成功。TDengine中存在两种机制可以促使缓存中的数据写入到硬盘上进行持久化存储
<center> <img src="../assets/write_process.png"> </center>
<center> 图 3 TDengine写入流程 </center>
1. **时间驱动的落盘**TDengine服务会定时将vnode缓存中的数据写入到硬盘上默认为一个小时落一次盘。落盘间隔可在配置文件taos.cfg中通过参数commitTime配置。
2. **数据驱动的落盘**当vnode中缓存的数据达到一定规模时为了不阻塞后续数据的写入TDengine也会拉起落盘线程将缓存中的数据清空。数据驱动的落盘会刷新定时落盘的时间。
TDengine在数据落盘时会打开新的预写日志文件在落盘后则会删除老的预写日志文件避免日志文件无限制的增长。TDengine对缓存按照先进先出的原则进行管理以保证每个表的最新数据都在缓存中。
## 数据存储
TDengine将所有数据存储在/var/lib/taos/目录下您可以通过系统配置参数dataDir进行个性化配置。
TDengine中的元数据信息包括TDengine中的数据库、表、用户等信息。每个超级表、以及每个表的标签数据也存放在这里。为提高访问速度元数据全部有缓存。
TDengine中写入的数据在硬盘上是按时间维度进行分片的。同一个vnode中的表在同一时间范围内的数据都存放在同一文件组中。这一数据分片方式可以大大简化数据在时间维度的查询提高查询速度。在默认配置下硬盘上的每个数据文件存放10天数据。用户可根据需要修改系统配置参数daysPerFile进行个性化配置。
表中的数据都有保存时间一旦超过保存时间缺省是3650天数据将被系统自动删除。您可以通过系统配置参数daysToKeep进行个性化设置。
数据在文件中是按块存储的。每个数据块只包含一张表的数据,且数据是按照时间主键递增排列的。数据在数据块中按列存储,这样使得同列的数据存放在一起,对于不同的数据类型还采用不同的压缩方法,大大提高压缩的比例,节省存储空间。
数据文件总共有三类文件一类是data文件它存放了真实的数据块该文件只进行追加操作一类文件是head文件, 它存放了其对应的data文件中数据块的索引信息第三类是last文件专门存储最后写入的数据每次落盘操作时这部分数据会与内存里的数据合并并决定是否写入data文件还是last文件。

View File

@ -1,101 +0,0 @@
# Data Model and Architecture
## Data Model
### A Typical IoT Scenario
In a typical IoT scenario, there are many types of devices. Each device is collecting one or multiple metrics. For a specific type of device, the collected data could look like the table below:
| Device ID | Time Stamp | Value 1 | Value 2 | Value 3 | Tag 1 | Tag 2 |
| :-------: | :-----------: | :-----: | :-----: | :-----: | :---: | :---: |
| D1001 | 1538548685000 | 10.3 | 219 | 0.31 | Red | Tesla |
| D1002 | 1538548684000 | 10.2 | 220 | 0.23 | Blue | BMW |
| D1003 | 1538548686500 | 11.5 | 221 | 0.35 | Black | Honda |
| D1004 | 1538548685500 | 13.4 | 223 | 0.29 | Red | Volvo |
| D1001 | 1538548695000 | 12.6 | 218 | 0.33 | Red | Tesla |
| D1004 | 1538548696600 | 11.8 | 221 | 0.28 | Black | Honda |
Each data record contains the device ID, timestamp, collected metrics, and static tags associated with the device. Each device generates a data record in a pre-defined timer or triggered by an event. It is a sequence of data points like a stream.
### Data Characteristics
As the data points are a series of data points over time, the data points generated by devices, sensors, servers, and/or applications have some strong common characteristics:
1. metrics are always structured data;
2. there are rarely delete/update operations on collected data;
3. there is only one single data source for one device or sensor;
4. ratio of read/write is much lower than typical Internet applications;
5. the user pays attention to the trend of data, not a specific value at a specific time;
6. there is always a data retention policy;
7. the data query is always executed in a given time range and a subset of devices;
8. real-time aggregation or analytics is mandatory;
9. traffic is predictable based on the number of devices and sampling frequency;
10. data volume is huge, a system may generate 10 billion data points in a day.
By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency.
### Relational Database Model
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. Standard SQL is used, making it easy for anyone to get started and eliminating any learning curve.
### One Table for One Device
Due to different network latencies, the data points from different devices may arrive to the server out of order. But for the same device, data points will arrive to the server in order if the system is designed well. To utilize this special feature, TDengine requires the user to create a table for each device (time-stream). For example, if there are over 10,000 smart meters, 10,000 tables shall be created. For the table above, 4 tables shall be created for device D1001, D1002, D1003, and D1004 to store the data collected.
This strong requirement can guarantee that all data points from a device can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one device in a time range, this design will reduce the read latency significantly since a whole block is owned by one single device. Additionally, write latency can be significantly reduced too as the data points generated by the same device will arrive in order, the new data point will be simply appended to a block. Cache block size and the rows of records in a file block can be configured to fit different scenarios for optimal efficiency.
### Best Practices
**Table**: TDengine suggests to use device ID as the table name (like D1001 in the above diagram). Each device may collect one or more metrics (like value1, value2, value3 in the diagram). Each metric has a column in the table, the metric name can be used as the column name. The data type for a column can be int, float, double, tinyint, bigint, bool or binary. Sometimes, a device may have multiple metric groups, each group containing different sampling periods, so for best practice you should create a table for each group for each device. The first column in the table must be a time stamp. TDengine uses the time stamp as the index, and wont build the index on any metrics stored.
**Tags:** To support aggregation over multiple tables efficiently, the [STable(Super Table)](../super-table) concept is introduced by TDengine. A STable is used to represent the same type of device. The schema is used to define the collected metrics (like value1, value2, value3 in the diagram), and tags are used to define the static attributes for each table or device (like tag1, tag2 in the diagram). A table is created via STable with a specific tag value. All or a subset of tables in a STable can be aggregated by filtering tag values.
**Database:** Different types of devices may generate data points in different patterns and should be processed differently. For example, sampling frequency, data retention policy, replication number, cache size, record size, the compression algorithm may be different. To make the system more efficient, TDengine suggests creating a different database with unique configurations for different scenarios.
**Schemaless vs Schema:** Compared with NoSQL databases, since a table with schema definitions must be created before the data points can be inserted, flexibilities are not that good, especially when the schema is changed. But in most IoT scenarios, the schema is well defined and is rarely changed, the loss of flexibility won't pose any impact to developers or administrators. TDengine allows the application to change the schema in a second even there is a huge amount of historical data when schema has to be changed.
TDengine does not impose a limitation on the number of tables, [STables](../super-table), or databases. You can create any number of STable or databases to fit different scenarios.
## Architecture
There are two main modules in TDengine server as shown in Picture 1: **Management Module (MGMT)** and **Data Module(DNODE)**. The whole TDengine architecture also includes a **TDengine Client Module**.
<center> <img src="../assets/structure.png"> </center>
<center> Picture 1 TDengine Architecture </center>
### MGMT Module
The MGMT module deals with the storage and querying on metadata, which includes information about users, databases, and tables. Applications will connect to the MGMT module at first when connecting the TDengine server. When creating/dropping databases/tables, The request is sent to the MGMT module at first to create/delete metadata. Then the MGMT module will send requests to the data module to allocate/free resources required. In the case of writing or querying, applications still need to visit the MGMT module to get meta data, according to which, then access the DNODE module.
### DNODE Module
The DNODE module is responsible for storing and querying data. For the sake of future scaling and high-efficient resource usage, TDengine applies virtualization on resources it uses. TDengine introduces the concept of a virtual node (vnode), which is the unit of storage, resource allocation and data replication (enterprise edition). As is shown in Picture 2, TDengine treats each data node as an aggregation of vnodes.
When a DB is created, the system will allocate a vnode. Each vnode contains multiple tables, but a table belongs to only one vnode. Each DB has one or mode vnodes, but one vnode belongs to only one DB. Each vnode contains all the data in a set of tables. Vnodes have their own cache and directory to store data. Resources between different vnodes are exclusive with each other, no matter cache or file directory. However, resources in the same vnode are shared between all the tables in it. Through virtualization, TDengine can distribute resources reasonably to each vnode and improve resource usage and concurrency. The number of vnodes on a dnode is configurable according to its hardware resources.
<center> <img src="../assets/vnode.png"> </center>
<center> Picture 2 TDengine Virtualization </center>
### Client Module
TDengine client module accepts requests (mainly in SQL form) from applications and converts the requests to internal representations and sends to the server side. TDengine supports multiple interfaces, which are all built on top of TDengine client module.
For the communication between client and MGMT module, TCP/UDP is used, the port is set by the parameter `mgmtShellPort` in system configuration file `taos.cfg`, default is 6030. For communication between the client and the DNODE module, TCP/UDP is used, the port is set by the parameter `vnodeShellPort` in the system configuration file, default is 6035.
## Writing Process
Picture 3 shows the full writing process of TDengine. TDengine uses the [Writing Ahead Log] (http://en.wikipedia.org/wiki/Write-ahead_logging) strategy to assure data security and integrity. Data received from the client is written to the commit log at first. When TDengine recovers from crashes caused by power loss or other situations, the commit log is used to recover data. After writting to the commit log, data will be wrtten to the corresponding vnode cache, then an acknowledgment is sent to the application. There are two mechanisms that can flush data in cache to disk for persistent storage:
1. **Flush driven by timer**: There is a backend timer which flushes data in cache periodically to disks. The period is configurable via parameter commitTime in system configuration file taos.cfg.
2. **Flush driven by data**: Data in the cache is also flushed to disks when the left buffer size is below a threshold. Flush driven by data can reset the timer of flush driven by the timer.
<center> <img src="../assets/write_process.png"> </center>
<center> Picture 3 TDengine Writting Process </center>
New commit log files will be opened when the committing process begins. When the committing process finishes, the old commit file will be removed.
## Data Storage
TDengine data are saved in _/var/lib/taos_ directory by default. It can be changed to other directories by setting the parameter `dataDir` in system configuration file taos.cfg.
TDengine's metadata includes the database, table, user, super table and tag information. To reduce the latency, metadata are all buffered in the cache.
Data records saved in tables are sharded according to the time range. Data from tables in the same vnode in a certain time range are saved in the same file group. This sharding strategy can effectively improve data search speed. By default, one group of files contain data in 10 days, which can be configured by `daysPerFile` in the configuration file or by the *DAYS* keyword in *CREATE DATABASE* clause.
Data records are removed automatically once their lifetime is passed. The lifetime is configurable via parameter daysToKeep in the system configuration file. The default value is 3650 days.
Data in files are blockwise. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp. To improve the compression ratio, records are stored column by column, and different compression algorithms are applied based on each column's data type.

View File

@ -1,248 +0,0 @@
# TDengine的技术设计
## 存储设计
TDengine的数据存储主要包含**元数据的存储**和**写入数据的存储**。以下章节详细介绍了TDengine各种数据的存储结构。
### 元数据的存储
TDengine中的元数据信息包括TDengine中的数据库超级表等信息。元数据信息默认存放在 _/var/lib/taos/mgmt/_ 文件夹下。该文件夹的目录结构如下所示:
```
/var/lib/taos/
+--mgmt/
+--db.db
+--meters.db
+--user.db
+--vgroups.db
```
元数据在文件中按顺序排列。文件中的每条记录代表TDengine中的一个元数据机构数据库、表等。元数据文件只进行追加操作即便是元数据的删除也只是在数据文件中追加一条删除的记录。
### 写入数据的存储
TDengine中写入的数据在硬盘上是按时间维度进行分片的。同一个vnode中的表在同一时间范围内的数据都存放在同一文件组中如下图中的v0f1804*文件。这一数据分片方式可以大大简化数据在时间维度的查询提高查询速度。在默认配置下硬盘上的每个文件存放10天数据。用户可根据需要调整数据库的 _daysPerFile_ 配置项进行配置。 数据在文件中是按块存储的。每个数据块只包含一张表的数据且数据是按照时间主键递增排列的。数据在数据块中按列存储这样使得同类型的数据存放在一起可以大大提高压缩的比例节省存储空间。TDengine对不同类型的数据采用了不同的压缩算法进行压缩以达到最优的压缩结果。TDengine使用的压缩算法包括simple8B、delta-of-delta、RLE以及LZ4等。
TDengine的数据文件默认存放在 */var/lib/taos/data/* 下。而 */var/lib/taos/tsdb/* 文件夹下存放了vnode的信息、vnode中表的信息以及数据文件的链接等。其完整目录结构如下所示
```
/var/lib/taos/
+--tsdb/
| +--vnode0
| +--meterObj.v0
| +--db/
| +--v0f1804.head->/var/lib/taos/data/vnode0/v0f1804.head1
| +--v0f1804.data->/var/lib/taos/data/vnode0/v0f1804.data
| +--v0f1804.last->/var/lib/taos/data/vnode0/v0f1804.last1
| +--v0f1805.head->/var/lib/taos/data/vnode0/v0f1805.head1
| +--v0f1805.data->/var/lib/taos/data/vnode0/v0f1805.data
| +--v0f1805.last->/var/lib/taos/data/vnode0/v0f1805.last1
| :
+--data/
+--vnode0/
+--v0f1804.head1
+--v0f1804.data
+--v0f1804.last1
+--v0f1805.head1
+--v0f1805.data
+--v0f1805.last1
:
```
#### meterObj文件
每个vnode中只存在一个 _meterObj_ 文件。该文件中存储了vnode的基本信息创建时间配置信息vnode的统计信息等以及该vnode中表的信息。其结构如下所示
```
<文件开始>
[文件头]
[表记录1偏移量和长度]
[表记录2偏移量和长度]
...
[表记录N偏移量和长度]
[表记录1]
[表记录2]
...
[表记录N]
[表记录]
<文件结尾>
```
其中文件头大小为512字节主要存放vnode的基本信息。每条表记录代表属于该vnode中的一张表在硬盘上的表示。
#### head文件
head文件中存放了其对应的data文件中数据块的索引信息。该文件组织形式如下
```
<文件开始>
[文件头]
[表1偏移量]
[表2偏移量]
...
[表N偏移量]
[表1数据索引]
[表2数据索引]
...
[表N数据索引]
<文件结尾>
```
文件开头的偏移量列表表示对应表的数据索引块的开始位置在文件中的偏移量。每张表的数据索引信息在head文件中都是连续存放的。这也使得TDengine在读取单表数据时可以将该表所有的数据块索引一次性读入内存大大提高读取速度。表的数据索引块组织如下
```
[索引块信息]
[数据块1索引]
[数据块2索引]
...
[数据块N索引]
```
其中索引块信息中记录了数据块的个数等描述信息。每个数据块索引对应一个在data文件或last文件中的一个单独的数据块。索引信息中记录了数据块存放的文件、数据块起始位置的偏移量、数据块中数据时间主键的范围等。索引块中的数据块索引是按照时间范围顺序排放的这也就是说索引块M对应的数据块中的数据时间范围都大于索引块M-1的。这种预先排序的存储方式使得在TDengine在进行按照时间戳进行查询时可以使用折半查找算法大大提高查询速度。
#### data文件
data文件中存放了真实的数据块。该文件只进行追加操作。其文件组织形式如下
```
<文件开始>
[文件头]
[数据块1]
[数据块2]
...
[数据块N]
<文件结尾>
```
每个数据块只属于vnode中的一张表且数据块中的数据按照时间主键排列。数据块中的数据按列组织排放使得同一类型的数据排放在一起方便压缩和读取。每个数据块的组织形式如下所示
```
[列1信息]
[列2信息]
...
[列N信息]
[列1数据]
[列2数据]
...
[列N数据]
```
列信息中包含该列的类型,列的压缩算法,列数据在文件中的偏移量以及长度等。除此之外,列信息中也包含该内存块中该列数据的预计算结果,从而在过滤查询时根据预计算结果判定是否读取数据块,大大提高读取速度。
#### last文件
为了防止数据块的碎片化提高查询速度和压缩率TDengine引入了last文件。当要落盘的数据块中的数据条数低于某个阈值时TDengine会先将该数据块写入到last文件中进行暂时存储。当有新的数据需要落盘时last文件中的数据会被读取出来与新数据组成新的数据块写入到data文件中。last文件的组织形式与data文件类似。
### TDengine数据存储小结
TDengine通过其创新的架构和存储结构设计有效提高了计算机资源的使用率。一方面TDengine的虚拟化使得TDengine的水平扩展及备份非常容易。另一方面TDengine将表中数据按时间主键排序存储且其列式存储的组织形式都使TDengine在写入、查询以及压缩方面拥有非常大的优势。
## 查询处理
### 概述
TDengine提供了多种多样针对表和超级表的查询处理功能除了常规的聚合查询之外还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、管理节点、数据节点协同完成。 各组件包含的与查询处理相关的功能和模块如下:
客户端Client App。客户端包含TAOS SQL的解析SQL Parser和查询请求执行器Query Executor第二阶段聚合器Result Merger连续查询管理器Continuous Query Manager等主要功能模块构成。SQL解析器负责对SQL语句进行解析校验并转化为抽象语法树查询执行器负责将抽象语法树转化查询执行逻辑并根据SQL语句查询条件将其转换为针对管理节点元数据查询和针对数据节点的数据查询两级查询处理。由于TAOS SQL当前不提供复杂的嵌套查询和pipeline查询处理机制所以不再需要查询计划优化、逻辑查询计划到物理查询计划转换等过程。第二阶段聚合器负责将各数据节点查询返回的独立结果进行二阶段聚合生成最后的结果。连续查询管理器则负责针对用户建立的连续查询进行管理负责定时拉起查询请求并按需将结果写回TDengine或返回给客户应用。此外客户端还负责查询失败后重试、取消查询请求、以及维持连接心跳、向管理节点上报查询状态等工作。
管理节点Management Node。管理节点保存了整个集群系统的全部数据的元数据信息向客户端节点提供查询所需的数据的元数据并根据集群的负载情况切分查询请求。通过超级表包含了通过该超级表创建的所有表的信息因此查询处理器Query Executor负责针对标签TAG的查询处理并将满足标签查询请求的表信息返回给客户端。此外管理节点还维护集群的查询状态Query Status Manager维护查询状态管理中在内存中临时保存有当前正在执行的全部查询当客户端使用 *show queries* 命令的时候,将当前系统正在运行的查询信息返回客户端。
数据节点Data Node。数据节点保存了数据库中全部数据内容并通过查询执行器、查询处理调度器、查询任务队列Query Task Queue进行查询处理的调度执行从客户端接收到的查询处理请求都统一放置到处理队列中查询执行器从队列中获得查询请求并负责执行。通过查询优化器Query Optimizer对于查询进行基本的优化处理以及通过数据节点的查询执行器Query Executor扫描符合条件的数据单元并返回计算结果。等接收客户端发出的查询请求执行查询处理并将结果返回。同时数据节点还需要响应来自管理节点的管理信息和命令例如 *kill query* 命令以后,需要即刻停止执行的查询任务。
<center> <img src="../assets/fig1.png"> </center>
<center>图 1. 系统查询处理架构图(只包含查询相关组件)</center>
### 普通查询处理
客户端、管理节点、数据节点协同完成TDengine的查询处理全流程。我们以一个具体的SQL查询为例说明TDengine的查询处理流程。SQL语句向超级表*FOO_SUPER_TABLE*查询获取时间范围在2019年1月12日整天标签TAG_LOC是'beijing'的表所包含的所有记录总数SQL语句如下
```sql
SELECT COUNT(*)
FROM FOO_SUPER_TABLE
WHERE TAG_LOC = 'beijing' AND TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00'
```
首先客户端调用TAOS SQL解析器对SQL语句进行解析及合法性检查然后生成语法树并从中提取查询的对象 — 超级表 *FOO_SUPER_TABLE* 然后解析器向管理节点Management Node请求其相应的元数据信息并将过滤信息TAG_LOC='beijing')同时发送到管理节点。
管理节点接收元数据获取的请求,首先找到超级表 *FOO_SUPER_TABLE* 基础信息然后应用查询条件来过滤通过该超级表创建的全部表最后满足查询条件TAG_LOC='beijing'),即 *TAG_LOC* 标签列是 'beijing' 的的通过其查询执行器将满足查询要求的对象(表或超级表)的元数据信息返回给客户端。
客户端获得了 *FOO_SUPER_TABLE* 的元数据信息后查询执行器根据元数据中的数据分布分别向保存有相应数据的节点发起查询请求此时时间戳范围过滤条件TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00')需要同时发送给全部的数据节点。
数据节点接收到发自客户端的查询,转化为内部结构并进行优化以后将其放入任务执行队列,等待查询执行器执行。当查询结果获得以后,将查询结果返回客户端。数据节点执行查询的过程均相互独立,完全只依赖于自身的数据和内容进行计算。
当所有查询涉及的数据节点返回结果后,客户端将每个数据节点查询的结果集再次进行聚合(针对本案例,即将所有结果再次进行累加),累加的结果即为最后的查询结果。第二阶段聚合并不是所有的查询都需要。例如,针对数据的列选取操作,实际上是不需要第二阶段聚合。
### REST查询处理
在 C/C++ 、Python接口、 JDBC 接口之外TDengine 还提供基于 HTTP 协议的 REST 接口。不同于使用应用客户端开发程序进行的开发。当用户使用 REST 接口的时候,所有的查询处理过程都是在服务器端来完成,用户的应用服务不会参与数据库的计算过程,查询处理完成后结果通过 HTTP的 JSON 格式返回给用户。
<center> <img src="../assets/fig2.png"> </center>
<center>图 2. REST查询架构</center>
当用户使用基于HTTP的REST查询接口HTTP的请求首先与位于数据节点的HTTP连接器 Connector建立连接然后通过REST的签名机制使用Token来确保请求的可靠性。对于数据节点HTTP连接器接收到请求后调用内嵌的客户端程序发起查询请求内嵌客户端将解析通过HTTP连接器传递过来的SQL语句解析该SQL语句并按需向管理节点请求元数据信息然后向本机或集群中其他节点发送查询请求最后按需聚合计算结果。HTTP连接器接收到请求SQL以后后续的流程处理与采用应用客户端方式的查询处理完全一致。最后还需要将查询的结果转换为JSON格式字符串并通过HTTP 响应返回给客户端。
可以看到在处理HTTP流程的整个过程中用户应用不再参与到查询处理的过程中只负责通过HTTP协议发送SQL请求并接收JSON格式的结果。同时还需要注意的是每个数据节点均内嵌了一个HTTP连接器和客户端程序因此请求集群中任何一个数据节点该数据节点均能够通过HTTP协议返回用户的查询结果。
### 技术特征
由于TDengine采用数据和标签分离存储的模式能够极大地降低标签数据存储的冗余度。标签数据直接关联到每个表并采用全内存的结构进行管理和维护标签数据,全内存的结构提供快速的查询处理千万级别规模的标签数据查询可以在毫秒级别返回。首先针对标签数据的过滤可以有效地降低第二阶段的查询涉及的数据规模。为有效地提升查询处理的性能针对物联网数据的不可更改的特点TDengine采用在每个保存的数据块上都记录下该数据块中数据的最大值、最小值、和等统计数据。如果查询处理涉及整个数据块的全部数据则直接使用预计算结果不再读取数据块的内容。由于预计算模块的大小远小于磁盘上存储的具体数据的大小对于磁盘IO为瓶颈的查询处理使用预计算结果可以极大地减小读取IO并加速查询处理的流程。
由于TDengine采用按列存储数据。当从磁盘中读取数据块进行计算的时候按照查询列信息读取该列数据并不需要读取其他不相关的数据可以最小化读取数据。此外由于采用列存储结构数据节点针对数据的扫描采用该列数据块进行可以充分利用CPU L2高速缓存极大地加速数据扫描的速度。此外对于某些查询并不会等全部查询结果生成后再返回结果。例如列选取查询当第一批查询结果获得以后数据节点直接将其返回客户端。同时在查询处理过程中系统在数据节点接收到查询请求以后马上返回客户端查询确认信息并同时拉起查询处理过程并等待查询执行完成后才返回给用户查询有响应。
## 集群设计
### 1集群与主要逻辑单元
TDengine是基于硬件、软件系统不可靠、一定会有故障的假设进行设计的是基于任何单台计算机都无足够能力处理海量数据的假设进行设计的。因此TDengine从研发的第一天起就按照分布式高可靠架构进行设计是完全去中心化的是水平扩展的这样任何单台或多台服务器宕机或软件错误都不影响系统的服务。通过节点虚拟化并辅以自动化负载均衡技术TDengine能最大限度地利用异构集群中的计算和存储资源。而且只要数据副本数大于一无论是硬软件的升级、还是IDC的迁移等都无需停止集群的服务极大地保证系统的正常运行并且降低了系统管理员和运维人员的工作量。
下面的示例图上有八个物理节点,每个物理节点被逻辑的划分为多个虚拟节点。下面对系统的基本概念进行介绍。
![assets/nodes.png](../assets/nodes.png)
**物理节点(dnode)**集群中的一物理服务器或云平台上的一虚拟机。为安全以及通讯效率一个物理节点可配置两张网卡或两个IP地址。其中一张网卡用于集群内部通讯其IP地址为**privateIp**, 另外一张网卡用于与集群外部应用的通讯其IP地址为**publicIp**。在一些云平台如阿里云对外的IP地址是映射过来的因此publicIp还有一个对应的内部IP地址**internalIp**(与privateIp不同)。对于只有一个IP地址的物理节点publicIp, privateIp以及internalIp都是同一个地址没有任何区别。一个dnode上有而且只有一个taosd实例运行。
**虚拟数据节点(vnode)**:在物理节点之上的可独立运行的基础逻辑单元,时序数据写入、存储、查询等操作逻辑都在虚拟节点中进行(图中V)采集的时序数据就存储在vnode上。一个vnode包含固定数量的表。当创建一张新表时系统会检查是否需要创建新的vnode。一个物理节点上能创建的vnode的数量取决于物理节点的硬件资源。一个vnode只属于一个DB但一个DB可以有多个vnode。
**虚拟数据节点组(vgroup)**: 位于不同物理节点的vnode可以组成一个虚拟数据节点组vnode group(如上图dnode0中的V0, dnode1中的V1, dnode6中的V2属于同一个虚拟节点组)。归属于同一个vgroup的虚拟节点采取master/slave的方式进行管理。写只能在master上进行但采用asynchronous的方式将数据同步到slave这样确保了一份数据在多个物理节点上有拷贝。如果master节点宕机其他节点监测到后将重新选举vgroup里的master, 新的master能继续处理数据请求从而保证系统运行的可靠性。一个vgroup里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N系统必须有至少N个物理节点。副本数在创建DB时通过参数replica可以指定缺省为1。使用TDengine, 数据的安全依靠多副本解决,因此不再需要昂贵的磁盘阵列等存储设备。
**虚拟管理节点(mnode)**负责所有节点运行状态的监控和维护以及节点之间的负载均衡图中M。同时虚拟管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理因此也称为Meta Node。TDengine集群中可配置多个(最多不超过5个) mnode它们自动构建成为一个管理节点集群(图中M0, M1, M2)。mnode间采用master/slave的机制进行管理而且采取强一致方式进行数据同步。mnode集群的创建由系统自动完成无需人工干预。每个dnode上至多有一个mnode而且每个dnode都知道整个集群中所有mnode的IP地址。
**taosc**一个软件模块是TDengine给应用提供的驱动程序driver,内嵌于JDBC、ODBC driver中或者C语言连接库里。应用都是通过taosc而不是直接来与整个集群进行交互的。这个模块负责获取并缓存元数据将插入、查询等请求转发到正确的虚拟节点在把结果返回给应用时还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, ODBC, C/C++接口而言这个模块是在应用所处的计算机上运行但消耗的资源很小。为支持全分布式的REST接口taosc在TDengine集群的每个dnode上都有一运行实例。
**对外服务地址**TDengine集群可以容纳单台、多台甚至几千台物理节点。应用只需要向集群中任何一个物理节点的publicIp发起连接即可。启动CLI应用taos时选项-h需要提供的就是publicIp。
**master/secondIp**每一个dnode都需要配置一个masterIp。dnode启动后将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp对于集群中的第一个节点就是它自己的privateIp。为保证连接成功每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图链接secondIp。
dnode启动后会获知集群的mnode IP列表并且定时向mnode发送状态信息。
vnode与mnode只是逻辑上的划分都是执行程序taosd里的不同线程而已无需安装不同的软件做任何特殊的配置。最小的系统配置就是一个物理节点,vnode,mnode和taosc都存在而且都正常运行但单一节点无法保证系统的高可靠。
### 2一典型的操作流程
为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色下面对写入数据这个典型操作的流程进行剖析。
![Picture1](../assets/Picture2.png)
1. 应用通过JDBC、ODBC或其他API接口发起插入数据的请求。
2. taosc会检查缓存看是有保存有该表的meta data。如果有直接到第4步。如果没有taosc将向mnode发出get meta-data请求。
3. mnode将该表的meta-data返回给taosc。Meta-data包含有该表的schema, 而且还有该表所属的vgroup信息vnode ID以及所在的dnode的IP地址如果副本数为N就有N组vnodeID/IP)。如果taosc迟迟得不到mnode回应而且存在多个mnode,taosc将向下一个mnode发出请求。
4. taosc向master vnode发起插入请求。
5. vnode插入数据后给taosc一个应答表示插入成功。如果taosc迟迟得不到vnode的回应taosc会认为该节点已经离线。这种情况下如果被插入的数据库有多个副本taosc将向vgroup里下一个vnode发出插入请求。
6. taosc通知APP写入成功。
对于第二和第三步taosc启动时并不知道mnode的IP地址因此会直接向配置的集群对外服务的IP地址发起请求。如果接收到该请求的dnode并没有配置mnode该dnode会在回复的消息中告知mnode的IP地址列表如果有多个dnodesmnode的IP地址可以有多个这样taosc会重新向新的mnode的IP地址发出获取meta-data的请求。
对于第四和第五步没有缓存的情况下taosc无法知道虚拟节点组里谁是master就假设第一个vnodeID/IP就是master,向它发出请求。如果接收到请求的vnode并不是master,它会在回复中告知谁是master这样taosc就向建议的master vnode发出请求。一旦得到插入成功的回复taosc会缓存住master节点的信息。
上述是插入数据的流程查询、计算的流程也完全一致。taosc把这些复杂的流程全部封装屏蔽了因此应用无需处理重定向、获取meta data等细节完全是透明的。
通过taosc缓存机制只有在第一次对一张表操作时才需要访问mnode, 因此mnode不会成为系统瓶颈。但因为schema有可能变化而且vgroup有可能发生改变比如负载均衡发生因此taosc需要定时自动刷新缓存。
### 3数据分区
vnode(虚拟数据节点)保存采集的时序数据而且查询、计算都在这些节点上进行。为便于负载均衡、数据恢复、支持异构环境TDengine将一个物理节点根据其计算和存储资源切分为多个vnode。这些vnode的管理是TDengine自动完成的对应用完全透明。
对于单独一个数据采集点无论其数据量多大一个vnode或vnode group, 如果副本数大于1有足够的计算资源和存储资源来处理如果每秒生成一条16字节的记录一年产生的原始数据不到0.5G因此TDengine将一张表的所有数据都存放在一个vnode里而不会让同一个采集点的数据分布到两个或多个dnode上。而且一个vnode可存储多张表的数据一个vnode可容纳的表的数目由配置参数tables指定缺省为2000。设计上一个vnode里所有的表都属于同一个DB。因此一个数据库DB需要的vnode或vgroup的个数等于数据库表的数目/tables。
创建DB时系统并不会马上分配资源。但当创建一张表时系统将看是否有已经分配的vnode, 而且是否有空位如果有立即在该有空位的vnode创建表。如果没有系统将从集群中根据当前的负载情况在一个dnode上创建一新的vnode, 然后创建表。如果DB有多个副本系统不是只创建一个vnode而是一个vgroup(虚拟数据节点组)。系统对vnode的数目没有任何限制仅仅受限于物理节点本身的计算和存储资源。
参数tables的设置需要考虑具体场景创建DB时可以个性化指定该参数。该参数不宜过大也不宜过小。过小极端情况就是每个数据采集点一个vnode, 这样导致系统数据文件过多。过大虚拟化带来的优势就会丧失。给定集群计算资源的情况下整个系统vnode的个数应该是CPU核的数目的两倍以上。
### 4负载均衡
每个dnode(物理节点)都定时向 mnode(虚拟管理节点)报告其状态包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等因此mnode了解整个集群的状态。基于整体状态当mnode发现某个dnode负载过重它会将dnode上的一个或多个vnode挪到其他dnode。在挪动过程中对外服务继续进行数据插入、查询和计算操作都不受影响。负载均衡操作结束后应用也无需重启将自动连接新的vnode。
如果mnode一段时间没有收到dnode的状态报告mnode会认为这个dnode已经离线。如果离线时间超过一定时长时长由配置参数offlineThreshold决定该dnode将被mnode强制剔除出集群。该dnode上的vnodes如果副本数大于一系统将自动在其他dnode上创建新的副本以保证数据的副本数。
**Note**目前集群功能仅仅限于企业版

View File

@ -1,176 +0,0 @@
# TDengine System Architecture
## Storage Design
TDengine data mainly include **metadata** and **data** that we will introduce in the following sections.
### Metadata Storage
Metadata include the information of databases, tables, etc. Metadata files are saved in _/var/lib/taos/mgmt/_ directory by default. The directory tree is as below:
```
/var/lib/taos/
+--mgmt/
+--db.db
+--meters.db
+--user.db
+--vgroups.db
```
A metadata structure (database, table, etc.) is saved as a record in a metadata file. All metadata files are appended only, and even a drop operation adds a deletion record at the end of the file.
### Data storage
Data in TDengine are sharded according to the time range. Data of tables in the same vnode in a certain time range are saved in the same filegroup, such as files v0f1804*. This sharding strategy can effectively improve data searching speed. By default, a group of files contains data in 10 days, which can be configured by *daysPerFile* in the configuration file or by *DAYS* keyword in *CREATE DATABASE* clause. Data in files are blockwised. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp, which helps to improve the compression rate and save storage. The compression algorithms used in TDengine include simple8B, delta-of-delta, RLE, LZ4, etc.
By default, TDengine data are saved in */var/lib/taos/data/* directory. _/var/lib/taos/tsdb/_ directory contains vnode informations and data file linkes.
```
/var/lib/taos/
+--tsdb/
| +--vnode0
| +--meterObj.v0
| +--db/
| +--v0f1804.head->/var/lib/taos/data/vnode0/v0f1804.head1
| +--v0f1804.data->/var/lib/taos/data/vnode0/v0f1804.data
| +--v0f1804.last->/var/lib/taos/data/vnode0/v0f1804.last1
| +--v0f1805.head->/var/lib/taos/data/vnode0/v0f1805.head1
| +--v0f1805.data->/var/lib/taos/data/vnode0/v0f1805.data
| +--v0f1805.last->/var/lib/taos/data/vnode0/v0f1805.last1
| :
+--data/
+--vnode0/
+--v0f1804.head1
+--v0f1804.data
+--v0f1804.last1
+--v0f1805.head1
+--v0f1805.data
+--v0f1805.last1
:
```
#### meterObj file
There are only one meterObj file in a vnode. Informations bout the vnode, such as created time, configuration information, vnode statistic informations are saved in this file. It has the structure like below:
```
<start_of_file>
[file_header]
[table_record1_offset&length]
[table_record2_offset&length]
...
[table_recordN_offset&length]
[table_record1]
[table_record2]
...
[table_recordN]
<end_of_file>
```
The file header takes 512 bytes, which mainly contains informations about the vnode. Each table record is the representation of a table on disk.
#### head file
The _head_ files contain the index of data blocks in the _data_ file. The inner organization is as below:
```
<start_of_file>
[file_header]
[table1_offset]
[table2_offset]
...
[tableN_offset]
[table1_index_block]
[table2_index_block]
...
[tableN_index_block]
<end_of_file>
```
The table offset array in the _head_ file saves the information about the offsets of each table index block. Indices on data blocks in the same table are saved continuously. This also makes it efficient to load data indices on the same table. The data index block has a structure like:
```
[index_block_info]
[block1_index]
[block2_index]
...
[blockN_index]
```
The index block info part contains the information about the index block such as the number of index blocks, etc. Each block index corresponds to a real data block in the _data_ file or _last_ file. Information about the location of the real data block, the primary timestamp range of the data block, etc. are all saved in the block index part. The block indices are sorted in ascending order according to the primary timestamp. So we can apply algorithms such as the binary search on the data to efficiently search blocks according to time.
#### data file
The _data_ files store the real data block. They are append-only. The organization is as:
```
<start_of_file>
[file_header]
[block1]
[block2]
...
[blockN]
<end_of_file>
```
A data block in _data_ files only belongs to a table in the vnode and the records in a data block are sorted in ascending order according to the primary timestamp key. Data blocks are column-oriented. Data in the same column are stored contiguously, which improves reading speed and compression rate because of their similarity. A data block has the following organization:
```
[column1_info]
[column2_info]
...
[columnN_info]
[column1_data]
[column2_data]
...
[columnN_data]
```
The column info part includes information about column types, column compression algorithm, column data offset and length in the _data_ file, etc. Besides, pre-calculated results of the column data in the block are also in the column info part, which helps to improve reading speed by avoiding loading data block necessarily.
#### last file
To avoid storage fragment and to import query speed and compression rate, TDengine introduces an extra file, the _last_ file. When the number of records in a data block is lower than a threshold, TDengine will flush the block to the _last_ file for temporary storage. When new data comes, the data in the _last_ file will be merged with the new data and form a larger data block and written to the _data_ file. The organization of the _last_ file is similar to the _data_ file.
### Summary
The innovation in architecture and storage design of TDengine improves resource usage. On the one hand, the virtualization makes it easy to distribute resources between different vnodes and for future scaling. On the other hand, sorted and column-oriented storage makes TDengine have a great advantage in writing, querying and compression.
## Query Design
#### Introduction
TDengine provides a variety of query functions for both tables and super tables. In addition to regular aggregate queries, it also provides time window based query and statistical aggregation for time series data. TDengine's query processing requires the client app, management node, and data node to work together. The functions and modules involved in query processing included in each component are as follows:
Client (Client App). The client development kit, embed in a client application, consists of TAOS SQL parser and query executor, the second-stage aggregator (Result Merger), continuous query manager and other major functional modules. The SQL parser is responsible for parsing and verifying the SQL statement and converting it into an abstract syntax tree. The query executor is responsible for transforming the abstract syntax tree into the query execution logic and creates the metadata query according to the query condition of the SQL statement. Since TAOS SQL does not currently include complex nested queries and pipeline query processing mechanism, there is no longer need for query plan optimization and physical query plan conversions. The second-stage aggregator is responsible for performing the aggregation of the independent results returned by query involved data nodes at the client side to generate final results. The continuous query manager is dedicated to managing the continuous queries created by users, including issuing fixed-interval query requests and writing the results back to TDengine or returning to the client application as needed. Also, the client is also responsible for retrying after the query fails, canceling the query request, and maintaining the connection heartbeat and reporting the query status to the management node.
Management Node. The management node keeps the metadata of all the data of the entire cluster system, provides the metadata of the data required for the query from the client node, and divides the query request according to the load condition of the cluster. The super table contains information about all the tables created according to the super table, so the query processor (Query Executor) of the management node is responsible for the query processing of the tags of tables and returns the table information satisfying the tag query. Besides, the management node maintains the query status of the cluster in the Query Status Manager component, in which the metadata of all queries that are currently executing are temporarily stored in-memory buffer. When the client issues *show queries* command to management node, current running queries information is returned to the client.
Data Node. The data node, responsible for storing all data of the database, consists of query executor, query processing scheduler, query task queue, and other related components. Once the query requests from the client received, they are put into query task queue and waiting to be processed by query executor. The query executor extracts the query request from the query task queue and invokes the query optimizer to perform the basic optimization for the query execution plan. And then query executor scans the qualified data blocks in both cache and disk to obtain qualified data and return the calculated results. Besides, the data node also needs to respond to management information and commands from the management node. For example, after the *kill query* received from the management node, the query task needs to be stopped immediately.
<center> <img src="../assets/fig1.png"> </center>
<center>Fig 1. System query processing architecture diagram (only query related components)</center>
#### Query Process Design
The client, the management node, and the data node cooperate to complete the entire query processing of TDengine. Let's take a concrete SQL query as an example to illustrate the whole query processing flow. The SQL statement is to query on super table *FOO_SUPER_TABLE* to get the total number of records generated on January 12, 2019, from the table, of which TAG_LOC equals to 'beijing'. The SQL statement is as follows:
```sql
SELECT COUNT(*)
FROM FOO_SUPER_TABLE
WHERE TAG_LOC = 'beijing' AND TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00'
```
First, the client invokes the TAOS SQL parser to parse and validate the SQL statement, then generates a syntax tree, and extracts the object of the query - the super table *FOO_SUPER_TABLE*, and then the parser sends requests with filtering information (TAG_LOC='beijing') to management node to get the corresponding metadata about *FOO_SUPER_TABLE*.
Once the management node receives the request for metadata acquisition, first finds the super table *FOO_SUPER_TABLE* basic information, and then applies the query condition (TAG_LOC='beijing') to filter all the related tables created according to it. And finally, the query executor returns the metadata information that satisfies the query request to the client.
After the client obtains the metadata information of *FOO_SUPER_TABLE*, the query executor initiates a query request with timestamp range filtering condition (TS >= '2019- 01-12 00:00:00' AND TS < '2019-01-13 00:00:00') to all nodes that hold the corresponding data according to the information about data distribution in metadata.
The data node receives the query sent from the client, converts it into an internal structure and puts it into the query task queue to be executed by query executor after optimizing the execution plan. When the query result is obtained, the query result is returned to the client. It should be noted that the data nodes perform the query process independently of each other, and rely solely on their data and content for processing.
When all data nodes involved in the query return results, the client aggregates the result sets from each data node. In this case, all results are accumulated to generate the final query result. The second stage of aggregation is not always required for all queries. For example, a column selection query does not require a second-stage aggregation at all.
#### REST Query Process
In addition to C/C++, Python, and JDBC interface, TDengine also provides a REST interface based on the HTTP protocol, which is different from using the client application programming interface. When the user uses the REST interface, all the query processing is completed on the server-side, and the user's application is not involved in query processing anymore. After the query processing is completed, the result is returned to the client through the HTTP JSON string.
<center> <img src="../assets/fig2.png"> </center>
<center>Fig. 2 REST query architecture</center>
When a client uses an HTTP-based REST query interface, the client first establishes a connection with the HTTP connector at the data node and then uses the token to ensure the reliability of the request through the REST signature mechanism. For the data node, after receiving the request, the HTTP connector invokes the embedded client program to initiate a query processing, and then the embedded client parses the SQL statement from the HTTP connector and requests the management node to get metadata as needed. After that, the embedded client sends query requests to the same data node or other nodes in the cluster and aggregates the calculation results on demand. Finally, you also need to convert the result of the query into a JSON format string and return it to the client via an HTTP response. After the HTTP connector receives the request SQL, the subsequent process processing is completely consistent with the query processing using the client application development kit.
It should be noted that during the entire processing, the client application is no longer involved in, and is only responsible for sending SQL requests through the HTTP protocol and receiving the results in JSON format. Besides, each data node is embedded with an HTTP connector and a client, so any data node in the cluster received requests from a client, the data node can initiate the query and return the result to the client through the HTTP protocol, with transfer the request to other data nodes.
#### Technology
Because TDengine stores data and tags value separately, the tag value is kept in the management node and directly associated with each table instead of records, resulting in a great reduction of the data storage. Therefore, the tag value can be managed by a fully in-memory structure. First, the filtering of the tag data can drastically reduce the data size involved in the second phase of the query. The query processing for the data is performed at the data node. TDengine takes advantage of the immutable characteristics of IoT data by calculating the maximum, minimum, and other statistics of the data in one data block on each saved data block, to effectively improve the performance of query processing. If the query process involves all the data of the entire data block, the pre-computed result is used directly, and the content of the data block is no longer needed. Since the size of disk space required to store the pre-computation result is much smaller than the size of the specific data, the pre-computation result can greatly reduce the disk IO and speed up the query processing.
TDengine employs column-oriented data storage techniques. When the data block is involved to be loaded from the disk for calculation, only the required column is read according to the query condition, and the read overhead can be minimized. The data of one column is stored in a contiguous memory block and therefore can make full use of the CPU L2 cache to greatly speed up the data scanning. Besides, TDengine utilizes the eagerly responding mechanism and returns a partial result before the complete result is acquired. For example, when the first batch of results is obtained, the data node immediately returns it directly to the client in case of a column select query.

View File

@ -1,225 +0,0 @@
# 超级表STable多表聚合
TDengine要求每个数据采集点单独建表。独立建表的模式能够避免写入过程中的同步加锁因此能够极大地提升数据的插入/查询性能。但是独立建表意味着系统中表的数量与采集点的数量在同一个量级。如果采集点众多将导致系统中表的数量也非常庞大让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度TDengine引入了超级表(Super Table, 简称为STable)的概念。
## 什么是超级表
超级表是同一类型数据采集点的抽象是同类型采集实例的集合包含多张数据结构一样的子表。每个STable为其子表定义了表结构和一组标签表结构即表中记录的数据列及其数据类型标签名和数据类型由STable定义标签值记录着每个子表的静态信息用以对子表进行分组过滤。子表本质上就是普通的表由一个时间戳主键和若干个数据列组成每行记录着具体的数据数据查询操作与普通表完全相同但子表与普通表的区别在于每个子表从属于一张超级表并带有一组由STable定义的标签值。每种类型的采集设备可以定义一个STable。数据模型定义表的每列数据的类型如温度、压力、电压、电流、GPS实时位置等而标签信息属于Meta Data如采集设备的序列号、型号、位置等是静态的是表的元数据。用户在创建表数据采集点时指定STable(采集类型)外,还可以指定标签的值,也可事后增加或修改。
TDengine扩展标准SQL语法用于定义STable使用关键词tags指定标签信息。语法如下
```mysql
CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
```
其中tag_name是标签名tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型标签的个数最多为32个名字不能与系统关键词相同也不能与其他列名相同。如
```mysql
CREATE TABLE thermometer (ts timestamp, degree float)
TAGS (location binary(20), type int)
```
上述SQL创建了一个名为thermometer的STable带有标签location和标签type。
为某个采集点创建表时可以指定其所属的STable以及标签的值语法如下
```mysql
CREATE TABLE <tb_name> USING <stb_name> TAGS (tag_value1,...)
```
沿用上面温度计的例子使用超级表thermometer建立单个温度计数据表的语句如下
```mysql
CREATE TABLE t1 USING thermometer TAGS ('beijing', 10)
```
上述SQL以thermometer为模板创建了名为t1的表这张表的Schema就是thermometer的Schema但标签location值为'beijing'标签type值为10。
用户可以使用一个STable创建数量无上限的具有不同标签的表从这个意义上理解STable就是若干具有相同数据模型不同标签的表的集合。与普通表一样用户可以创建、删除、查看超级表STable大部分适用于普通表的查询操作都可运用到STable上包括各种聚合和投影选择函数。除此之外可以设置标签的过滤条件仅对STbale中部分表进行聚合查询大大简化应用的开发。
TDengine对表的主键时间戳建立索引暂时不提供针对数据模型中其他采集量比如温度、压力值的索引。每个数据采集点会采集若干数据记录但每个采集点的标签仅仅是一条记录因此数据标签在存储上没有冗余且整体数据规模有限。TDengine将标签数据与采集的动态数据完全分离存储而且针对STable的标签建立了高性能内存索引结构为标签提供全方位的快速操作支持。用户可按照需求对其进行增删改查CreateRetrieveUpdateDeleteCRUD操作。
STable从属于库一个STable只属于一个库但一个库可以有一到多个STable, 一个STable可有多个子表。
## 超级表管理
- 创建超级表
```mysql
CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
```
与创建表的SQL语法相似。但需指定TAGS字段的名称和类型。
说明:
1. TAGS列总长度不能超过16k bytes
2. TAGS列的数据类型不能是timestamp
3. TAGS列名不能与其他列名相同;
4. TAGS列名不能为预留关键字.
5. TAGS总数的上限是128.
- 显示已创建的超级表
```mysql
show stables;
```
查看数据库内全部STable及其相关信息包括STable的名称、创建时间、列数量、标签TAG数量、通过该STable建表的数量。
- 删除超级表
```mysql
DROP TABLE <stable_name>
```
Note: 删除STable时所有通过该STable创建的表都将被删除。
- 查看属于某STable并满足查询条件的表
```mysql
SELECT TBNAME,[TAG_NAME,…] FROM <stable_name> WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
```
查看属于某STable并满足查询条件的表。说明TBNAME为关键词显示通过STable建立的子表表名查询过程中可以使用针对标签的条件。
```mysql
SELECT COUNT(TBNAME) FROM <stable_name> WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
```
统计属于某个STable并满足查询条件的子表的数量
## 写数据时自动建子表
在某些特殊场景中用户在写数据时并不确定某个设备的表是否存在此时可使用自动建表语法来实现写入数据时里用超级表定义的表结构自动创建不存在的子表若该表已存在则不会建立新表。注意自动建表语句只能自动建立子表而不能建立超级表这就要求超级表已经被事先定义好。自动建表语法跟insert/import语法非常相似唯一区别是语句中增加了超级表和标签信息。具体语法如下
```mysql
INSERT INTO <tb_name> USING <stb_name> TAGS (<tag1_value>, ...) VALUES (field_value, ...) (field_value, ...) ...;
```
向表tb_name中插入一条或多条记录如果tb_name这张表不存在则会用超级表stb_name定义的表结构以及用户指定的标签值(即tag1_value…)来创建名为tb_name新表并将用户指定的值写入表中。如果tb_name已经存在则建表过程会被忽略系统也不会检查tb_name的标签是否与用户指定的标签值一致也即不会更新已存在表的标签。
```mysql
INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<field1_value1>, ...) (<field1_value2>, ...) ... <tb_name2> USING <stb_name2> TAGS(<tag1_value2>, ...) VALUES (<field1_value1>, ...) ...;
```
向多张表tb1_nametb2_name等插入一条或多条记录并分别指定各自的超级表进行自动建表。
## STable中TAG管理
除了更新标签的值的操作是针对子表进行其他所有的标签操作添加标签、删除标签等均只能作用于STable不能对单个子表操作。对STable添加标签以后依托于该STable建立的所有表将自动增加了一个标签对于数值型的标签新增加的标签的默认值是0.
- 添加新的标签
```mysql
ALTER TABLE <stable_name> ADD TAG <new_tag_name> <TYPE>
```
为STable增加一个新的标签并指定新标签的类型。标签总数不能超过128个。
- 删除标签
```mysql
ALTER TABLE <stable_name> DROP TAG <tag_name>
```
删除超级表的一个标签,从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。
说明第一列标签不能删除至少需要为STable保留一个标签。
- 修改标签名
```mysql
ALTER TABLE <stable_name> CHANGE TAG <old_tag_name> <new_tag_name>
```
修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。
- 修改子表的标签值
```mysql
ALTER TABLE <table_name> SET TAG <tag_name>=<new_tag_value>
```
## STable多表聚合
针对所有的通过STable创建的子表进行多表聚合查询支持按照全部的TAG值进行条件过滤并可将结果按照TAGS中的值进行聚合暂不支持针对binary类型的模糊匹配过滤。语法如下
```mysql
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<time range>)
GROUP BY <tag_name>, <tag_name>
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>
SOFFSET <group_offset>
LIMIT <record_limit>
OFFSET <record_offset>
```
**说明**
超级表聚合查询TDengine目前支持以下聚合\选择函数sum、count、avg、first、last、min、max、top、bottom以及针对全部或部分列的投影操作使用方式与单表查询的计算过程相同。暂不支持其他类型的聚合计算和四则运算。当前所有的函数及计算过程均不支持嵌套的方式进行执行。
不使用GROUP BY的查询将会对超级表下所有满足筛选条件的表按时间进行聚合结果输出默认是按照时间戳单调递增输出用户可以使用ORDER BY _c0 ASC|DESC选择查询结果时间戳的升降排序使用GROUP BY <tag_name> 的聚合查询会按照tags进行分组并对每个组内的数据分别进行聚合输出结果为各个组的聚合结果组间的排序可以由ORDER BY <tag_name> 语句指定,每个分组内部,时间序列是单调递增的。
使用SLIMIT/SOFFSET语句指定组间分页即指定结果集中输出的最大组数以及对组起始的位置。使用LIMIT/OFFSET语句指定组内分页即指定结果集中每个组内最多输出多少条记录以及记录起始的位置。
## STable使用示例
以温度传感器采集时序数据作为例示范STable的使用。 在这个例子中对每个温度计都会建立一张表表名为温度计的ID温度计读数的时刻记为ts采集的值记为degree。通过tags给每个采集器打上不同的标签其中记录温度计的地区和类型以方便我们后面的查询。所有温度计的采集量都一样因此我们用STable来定义表结构。
###1:定义STable表结构并使用它创建子表
创建STable语句如下
```mysql
CREATE TABLE thermometer (ts timestamp, degree double)
TAGS(location binary(20), type int)
```
假设有北京天津和上海三个地区的采集器共4个温度采集器有3种类型我们就可以对每个采集器建表如下
```mysql
CREATE TABLE therm1 USING thermometer TAGS ('beijing', 1);
CREATE TABLE therm2 USING thermometer TAGS ('beijing', 2);
CREATE TABLE therm3 USING thermometer TAGS ('tianjin', 1);
CREATE TABLE therm4 USING thermometer TAGS ('shanghai', 3);
```
其中therm1therm2therm3therm4是超级表thermometer四个具体的子表也即普通的Table。以therm1为例它表示采集器therm1的数据表结构完全由thermometer定义标签location=”beijing”, type=1表示therm1的地区是北京类型是第1类的温度计。
###2:写入数据
注意写入数据时不能直接对STable操作而是要对每张子表进行操作。我们分别向四张表therm1therm2 therm3 therm4写入一条数据写入语句如下
```mysql
INSERT INTO therm1 VALUES ('2018-01-01 00:00:00.000', 20);
INSERT INTO therm2 VALUES ('2018-01-01 00:00:00.000', 21);
INSERT INTO therm3 VALUES ('2018-01-01 00:00:00.000', 24);
INSERT INTO therm4 VALUES ('2018-01-01 00:00:00.000', 23);
```
###3:按标签聚合查询
查询位于北京(beijing)和天津(tianjin)两个地区的温度传感器采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)、最低温度min(degree),并将结果按所处地域(location)和传感器类型(type)进行聚合。
```mysql
SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location='beijing' or location='tianjin'
GROUP BY location, type
```
###4:按时间周期聚合查询
查询仅位于北京以外地区的温度传感器最近24小时(24h)采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)和最低温度min(degree)将采集结果按照10分钟为周期进行聚合并将结果按所处地域(location)和传感器类型(type)再次进行聚合。
```mysql
SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location<>'beijing' and ts>=now-1d
INTERVAL(10M)
GROUP BY location, type
```

View File

@ -1,195 +0,0 @@
# STable: Super Table
"One Table for One Device" design can improve the insert/query performance significantly for a single device. But it has a side effect, the aggregation of multiple tables becomes hard. To reduce the complexity and improve the efficiency, TDengine introduced a new concept: STable (Super Table).
## What is a Super Table
STable is an abstract and a template for a type of device. A STable contains a set of devices (tables) that have the same schema or data structure. Besides the shared schema, a STable has a set of tags, like the model, serial number and so on. Tags are used to record the static attributes for the devices and are used to group a set of devices (tables) for aggregation. Tags are metadata of a table and can be added, deleted or changed.
TDengine does not save tags as a part of the data points collected. Instead, tags are saved as metadata. Each table has a set of tags. To improve query performance, tags are all cached and indexed. One table can only belong to one STable, but one STable may contain many tables.
Like a table, you can create, show, delete and describe STables. Most query operations on tables can be applied to STable too, including the aggregation and selector functions. For queries on a STable, if no tags filter, the operations are applied to all the tables created via this STable. If there is a tag filter, the operations are applied only to a subset of the tables which satisfy the tag filter conditions. It will be very convenient to use tags to put devices into different groups for aggregation.
## Create a STable
Similiar to creating a standard table, syntax is:
```mysql
CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
```
New keyword "tags" is introduced, where tag_name is the tag name, and tag_type is the associated data type.
Note
1. The bytes of all tags together shall be less than 16k
2. Tag's data type can not be time stamp
3. Tag name shall be different from the field name
4. Tag name shall not be the same as system keywords
5. Maximum number of tags is 128
For example:
```mysql
create table thermometer (ts timestamp, degree float)
tags (location binary(20), type int)
```
The above statement creates a STable thermometer with two tag "location" and "type"
## Create a Table via STable
To create a table for a device, you can use a STable as its template and assign the tag values. The syntax is:
```mysql
CREATE TABLE <tb_name> USING <stb_name> TAGS (tag_value1,...)
```
You can create any number of tables via a STable, and each table may have different tag values. For example, you create five tables via STable thermometer below:
```mysql
create table t1 using thermometer tags ('beijing', 10);
create table t2 using thermometer tags ('beijing', 20);
create table t3 using thermometer tags ('shanghai', 10);
create table t4 using thermometer tags ('shanghai', 20);
create table t5 using thermometer tags ('new york', 10);
```
## Aggregate Tables via STable
You can group a set of tables together by specifying the tags filter condition, then apply the aggregation operations. The result set can be grouped and ordered based on tag value. Syntax is
```mysql
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<time range>)
GROUP BY <tag_name>, <tag_name>
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>
SOFFSET <group_offset>
LIMIT <record_limit>
OFFSET <record_offset>
```
For the time being, STable supports only the following aggregation/selection functions: *sum, count, avg, first, last, min, max, top, bottom*, and the projection operations, the same syntax as a standard table. Arithmetic operations are not supported, embedded queries not either.
*INTERVAL* is used for the aggregation over a time range.
If *GROUP BY* is not used, the aggregation is applied to all the selected tables, and the result set is output in ascending order of the timestamp, but you can use "*ORDER BY _c0 ASC|DESC*" to specify the order you like.
If *GROUP BY <tag_name>* is used, the aggregation is applied to groups based on tags. Each group is aggregated independently. Result set is a group of aggregation results. The group order is decided by *ORDER BY <tag_name>*. Inside each group, the result set is in the ascending order of the time stamp.
*SLIMIT/SOFFSET* are used to limit the number of groups and starting group number.
*LIMIT/OFFSET* are used to limit the number of records in a group and the starting rows.
###Example 1:
Check the average, maximum, and minimum temperatures of Beijing and Shanghai, and group the result set by location and type. The SQL statement shall be:
```mysql
SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location='beijing' or location='tianjin'
GROUP BY location, type
```
### Example 2:
List the number of records, average, maximum, and minimum temperature every 10 minutes for the past 24 hours for all the thermometers located in Beijing with type 10. The SQL statement shall be:
```mysql
SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
FROM thermometer
WHERE location='beijing' and type=10 and ts>=now-1d
INTERVAL(10M)
```
## Create Table Automatically
Insert operation will fail if the table is not created yet. But for STable, TDengine can create the table automatically if the application provides the STable name, table name and tags' value when inserting data points. The syntax is:
```mysql
INSERT INTO <tb_name> USING <stb_name> TAGS (<tag1_value>, ...) VALUES (field_value, ...) (field_value, ...) ... <tb_name2> USING <stb_name2> TAGS(<tag1_value2>, ...) VALUES (<field1_value1>, ...) ...;
```
When inserting data points into table tb_name, the system will check if table tb_name is created or not. If it is already created, the data points will be inserted as usual. But if the table is not created yet, the system will create the table tb_bame using STable stb_name as the template with the tags. Multiple tables can be specified in the SQL statement.
## Management of STables
After you can create a STable, you can describe, delete, change STables. This section lists all the supported operations.
### Show STables in current DB
```mysql
show stables;
```
It lists all STables in current DB, including the name, created time, number of fileds, number of tags, and number of tables which are created via this STable.
### Describe a STable
```mysql
DESCRIBE <stable_name>
```
It lists the STable's schema and tags
### Drop a STable
```mysql
DROP TABLE <stable_name>
```
To delete a STable, all the tables created via this STable will be deleted first.
### List the Associated Tables of a STable
```mysql
SELECT TBNAME,[TAG_NAME,…] FROM <stable_name> WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
```
It will list all the tables which satisfy the tag filter conditions. The tables are all created from this specific STable. TBNAME is a new keyword introduced, it is the table name associated with the STable.
```mysql
SELECT COUNT(TBNAME) FROM <stable_name> WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
```
The above SQL statement will list the number of tables in a STable, which satisfy the filter condition.
## Management of Tags
You can add, delete and change the tags for a STable, and you can change the tag value of a table. The SQL commands are listed below.
### Add a Tag
```mysql
ALTER TABLE <stable_name> ADD TAG <new_tag_name> <TYPE>
```
It adds a new tag to the STable with a data type. The maximum number of tags is 128.
### Drop a Tag
```mysql
ALTER TABLE <stable_name> DROP TAG <tag_name>
```
It drops a tag from a STable. The first tag could not be deleted, and there must be at least one tag.
### Change a Tag's Name
```mysql
ALTER TABLE <stable_name> CHANGE TAG <old_tag_name> <new_tag_name>
```
It changes the name of a tag from old to new.
### Change the Tag's Value
```mysql
ALTER TABLE <table_name> SET TAG <tag_name>=<new_tag_value>
```
It changes a table's tag value to a new one.

View File

@ -1,659 +0,0 @@
# TAOS SQL
本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。
TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上TAOS SQL并不是也不试图提供SQL标准的语法。此外由于TDengine针对的时序性结构化数据不提供修改和更新功能因此在TAO SQL中不提供数据更新和数据删除的相关功能。
本章节SQL语法遵循如下约定
- < > 里的内容是用户需要输入的,但不要输入<>本身
- [ ]表示内容为可选项,但不能输入[]本身
- | 表示多选一,选择其中一个即可,但不能输入|本身
- … 表示前面的项可重复多个
为更好地说明SQL语法的规则及其特点本文假设存在一个数据集。该数据集是针对两种类型的设备温度湿度传感器、气压海拔传感器建立的数据模型。
针对温度传感器具有超级表super table temp_stable。其数据模型如下
```
taos> describe temp_stable;
Field | Type | Length | Note |
=======================================================================================================
ts |TIMESTAMP | 8 | |
temperature |FLOAT | 4 | |
humidity |TINYINT | 1 | |
status |TINYINT | 1 | |
deviceid |BIGINT | 12 |tag |
location |BINARY | 20 |tag |
```
数据集包含2个温度传感器的数据按照TDengine的建模规则对应2个子表其名称分别是 temp_tb_1temp_tb_2 。
针对压力海拔传感器具有超级表super table pressure_stable。其数据模型如下
数据集包含2个压力传感器数据对应2个子表分别是 press_tb_1press_tb_2。
```text
taos> describe pressure_stable;
Field | Type | Length | Note |
=======================================================================================================
ts |TIMESTAMP | 8 | |
height |FLOAT | 4 | |
pressure |FLOAT | 4 | |
devstat |TINYINT | 1 | |
id |BIGINT | 8 |tag |
city |NCHAR | 20 |tag |
longitude |FLOAT | 4 |tag |
latitude |FLOAT | 4 |tag |
```
## 支持的数据类型
使用TDengine最重要的是时间戳。创建并插入记录、查询历史记录的时候均需要指定时间戳。时间戳有如下规则
- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数now是服务器的当前时间
- 插入记录时如果时间戳为0插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMicrosecond就可支持微秒。
在TDengine中普通表的数据模型中可使用以下10种数据类型。
| | 类型 | Bytes | 说明 |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
| 4 | FLOAT | 4 | 浮点型有效位数6-7范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型有效位数15-16范围 [-1.7E308, 1.7E308] |
| 6 | BINARY | 自定义 | 用于记录字符串理论上最长可以有16374字节但由于每行数据最多16K字节实际上限一般小于理论值。 binary仅支持字符串输入字符串两端使用单引号引用否则英文全部自动转化为小写。使用时须指定大小如binary(20)定义了最长为20个字符的字符串每个字符占1byte的存储空间。如果用户字符串超出20字节将会报错。对于字符串内的单引号可以用转义字符反斜线加单引号来表示**\**。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
| 10 | NCHAR | 自定义 | 用于记录非ASCII字符串如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用字符串内的单引号需用转义字符 **\**。nchar使用时须指定字符串大小类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符会固定占用40bytes的空间。如用户字符串长度超出声明长度则将会报错。 |
**Tips**: TDengine对SQL语句中的英文字符不区分大小写自动转化为小写执行。因此用户大小写敏感的字符串及密码需要使用单引号将字符串引起来。
## 数据库管理
- **创建数据库**
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep]
```
创建数据库。`KEEP`是该数据库的数据保留多长天数缺省是3650天(10年),数据库会自动删除超过时限的数据。数据库还有更多与存储相关的配置参数,请参见[系统管理](../administrator/#服务端配置)。
- **使用数据库**
```mysql
USE db_name
```
使用/切换数据库
- **删除数据库**
```mysql
DROP DATABASE [IF EXISTS] db_name
```
删除数据库。所包含的全部数据表将被删除,谨慎使用
- **显示系统所有数据库**
```mysql
SHOW DATABASES
```
## 表管理
- **创建数据表**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...])
```
说明1表的第一个字段必须是TIMESTAMP并且系统自动将其设为主键2表的每行长度不能超过16K字节3使用数据类型binary或nchar需指定其最长的字节数如binary(20)表示20字节。
- **删除数据表**
```mysql
DROP TABLE [IF EXISTS] tb_name
```
- **显示当前数据库下的所有数据表信息**
```mysql
SHOW TABLES [LIKE tb_name_wildcar]
```
显示当前数据库下的所有数据表信息。说明可在like中使用通配符进行名称的匹配。 通配符匹配1% (百分号)匹配0到任意个字符2_下划线匹配一个字符。
- **获取表的结构信息**
```mysql
DESCRIBE tb_name
```
- **表增加列**
```mysql
ALTER TABLE tb_name ADD COLUMN field_name data_type
```
- **表删除列**
```mysql
ALTER TABLE tb_name DROP COLUMN field_name
```
如果表是通过[超级表](../super-table/)创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构
**Tips**SQL语句中操作的当前数据库通过use db_name的方式指定中的表不需要指定表所属数据库。如果要操作非当前数据库中的表需要采用“库名”.“表名”的方式。例如demo.tb1是指数据库demo中的表tb1。
## 数据写入
- **插入一条记录**
```mysql
INSERT INTO tb_name VALUES (field_value, ...);
```
向表tb_name中插入一条记录
- **插入一条记录,数据对应到指定的列**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value, ...)
```
向表tb_name中插入一条记录数据对应到指定的列。SQL语句中没有出现的列数据库将自动填充为NULL。主键时间戳不能为NULL。
- **插入多条记录**
```mysql
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;
```
向表tb_name中插入多条记录
- **按指定的列插入多条记录**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
```
向表tb_name中按指定的列插入多条记录
- **向多个表插入多条记录**
```mysql
INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
tb2_name VALUES (field1_value1, ...)(field1_value2, ...)...;
```
同时向表tb1_name和tb2_name中分别插入多条记录
- **同时向多个表按列插入多条记录**
```mysql
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value1, ...)
tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
```
同时向表tb1_name和tb2_name中按列分别插入多条记录
注意1、对同一张表插入的新记录的时间戳必须递增否则会跳过插入该条记录。如果时间戳为0系统将自动使用服务器当前时间作为该记录的时间戳。
2、允许插入的最老记录的时间戳是相对于当前服务器时间减去配置的keep值数据保留的天数允许插入的最新记录的时间戳是相对于当前服务器时间加上配置的days值数据文件存储数据的时间跨度单位为天。keep和days都是可以在创建数据库时指定的缺省值分别是3650天和10天。
**IMPORT**如果需要将时间戳小于最后一条记录时间的记录写入到数据库中可使用IMPORT替代INSERT命令IMPORT的语法与INSERT完全一样。
## 数据查询
### 查询语法:
```mysql
SELECT [DISTINCT] select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL [interval_offset,] interval_val]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[HAVING expr_list]
[SLIMIT limit_val [, SOFFSET offset_val]]
[LIMIT limit_val [, OFFSET offset_val]]
[>> export_file]
```
#### SELECT子句
一个选择子句可以是联合查询UNION和另一个查询的子查询SUBQUERY
##### 通配符
通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。
```
taos> select * from temp_tb_1;
ts | temperature |humidity|status|
============================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
19-04-28 14:22:08.000| 21.50000 | 38 | 1 |
19-04-28 14:22:09.000| 21.30000 | 38 | 1 |
19-04-28 14:22:10.000| 21.20000 | 38 | 1 |
19-04-28 14:22:11.000| 21.30000 | 35 | 0 |
19-04-28 14:22:12.000| 22.00000 | 34 | 0 |
```
在针对超级表,通配符包含 _标签列_
```
taos> select * from temp_stable;
ts | temperature |humidity|status| deviceid | location |
==============================================================================================
19-04-28 14:22:07.000| 21.00000 | 37 | 1 |54197 |beijing |
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |91234 |beijing |
19-04-28 14:22:08.000| 21.50000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:09.000| 21.30000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:10.000| 21.20000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:11.000| 21.30000 | 35 | 0 |91234 |beijing |
19-04-28 14:22:12.000| 22.00000 | 34 | 0 |91234 |beijing |
```
通配符支持表名前缀以下两个SQL语句均为返回全部的列
```
select * from temp_tb_1;
select temp_tb_1.* from temp_tb_1;
```
在Join查询中带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
```
taos> select * from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts;
ts | temperature |humidity|status| ts | temperature |humidity|status|
========================================================================================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 | 19-04-28 14:22:07.000| 21.00000 | 37 | 1 |
```
```
taos> select temp_tb_1.* from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts;
ts | temperature |humidity|status|
============================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
```
在使用SQL函数来进行查询过程中部分SQL函数支持通配符操作。其中的区别在于
```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```
taos> select count(*) from temp_tb_1;
count(*) |
======================
1 |
```
```
taos> select first(*) from temp_tb_1;
first(ts) | first(temperature) |first(humidity)|first(status)|
==========================================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
```
#### 结果集列名
```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如:
```
taos> select ts, ts as primary_key_ts from temp_tb_1;
ts | primary_key_ts |
==============================================
19-04-28 14:22:07.000| 19-04-28 14:22:07.000|
```
但是针对```first(*)```、```last(*)```、```last_row(*)```不支持针对单列的重命名。
#### DISTINCT修饰符*
只能用于修饰标签列TAGS的结果不能用于修饰普通列来获得去重后的结果。并且应用```DISTINCT```以后,只能进行单列的标签输出。
```count(distinct column_name)```用以返回近似的不重复结果的数量,该结果是近似值。
#### 隐式结果列
```Select_exprs```可以是表所属列的列名也可以是基于列的函数表达式或计算式数量的上限256个。当用户使用了```interval```或```group by tags```的子句以后在最后返回结果中会强制返回时间戳列第一列和group by子句中的标签列。后续的版本中可以支持关闭group by子句中隐式列的输出列输出完全由select子句控制。
#### 表(超级表)列表
FROM关键字后面可以是若干个表超级表列表也可以是子查询的结果。
如果没有指定用户的当前数据库,可以在表名称之前使用数据库的名称来指定表所属的数据库。例如:```sample.temp_tb_1``` 方式来跨库使用表。
```
SELECT * FROM sample.temp_tb_1;
------------------------------
use sample;
SELECT * FROM temp_tb_1;
```
From子句中列表可以使用别名来让SQL整体更加简单。
```
SELECT t.ts FROM temp_tb_1 t ;
```
> 暂不支持FROM子句的表别名
#### 特殊功能
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
```
taos> SELECT database();
database() |
=================================
sample |
```
如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据则返回NULL。
```
taos> select database();
database() |
=================================
NULL |
```
获取服务器和客户端版本号:
```
SELECT client_version()
SELECT server_version()
```
服务器状态检测语句。如果服务器正常,返回一个数字(例如 1。如果服务器异常返回error code。该SQL语法能兼容连接池对于TDengine状态的检查及第三方工具对于数据库服务器状态的检查。并可以避免出现使用了错误的心跳检测SQL语句导致的连接池连接丢失的问题。
```
SELECT server_status()
SELECT server_status() AS result
```
#### TAOS SQL中特殊关键词
> TBNAME 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名<br>
\_c0: 表示表(超级表)的第一列
#### 小技巧
获取一个超级表所有的子表名及相关的标签信息:
```
SELECT TBNAME, location FROM temp_stable
```
统计超级表下辖子表数量:
```
SELECT COUNT(TBNAME) FROM temp_stable
```
以上两个查询均只支持在Where条件子句中添加针对标签TAGS的过滤条件。例如
```
taos> select count(tbname) from temp_stable;
count(tbname) |
======================
2 |
taos> select count(tbname) from temp_stable where deviceid > 60000;
count(tbname) |
======================
1 |
```
- 可以使用* 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名
- where语句可以使用各种逻辑判断来过滤数字值或使用通配符来过滤字符串
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序(_c0指首列时间戳)。使用ORDER BY对其他字段进行排序为非法操作。
- 参数LIMIT控制输出条数OFFSET指定从第几条开始输出。LIMIT/OFFSET对结果集的执行顺序在ORDER BY之后。
- 通过”>>"输出结果可以导出到指定文件
### 支持的条件过滤操作
| Operation | Note | Applicable Data Types |
| --------- | ----------------------------- | ------------------------------------- |
| > | larger than | **`timestamp`** and all numeric types |
| < | smaller than | **`timestamp`** and all numeric types |
| >= | larger than or equal to | **`timestamp`** and all numeric types |
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
| = | equal to | all types |
| <> | not equal to | all types |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. 同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件暂不支持OR连接的查询条件。
2. 针对某一字段的过滤只支持单一区间的过滤条件。例如value>20 and value<30是合法的过滤条件, 而Value<20 AND value<>5是非法的过滤条件。
### Some Examples
- 对于下面的例子表tb1用以下语句创建
```mysql
CREATE TABLE tb1 (ts timestamp, col1 int, col2 float, col3 binary(50))
```
- 查询tb1刚过去的一个小时的所有记录
```mysql
SELECT * FROM tb1 WHERE ts >= NOW - 1h
```
- 查询表tb1从2018-06-01 08:00:00.000 到2018-06-02 08:00:00.000时间范围并且col3的字符串是'nny'结尾的记录,结果按照时间戳降序
```mysql
SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC
```
- 查询col1与col2的和并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2结果输出仅仅10条记录从第5条开始
```mysql
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' and col2 > 1.2 LIMIT 10 OFFSET 5
```
- 查询过去10分钟的记录col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`.
```mysql
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv
```
## SQL函数
###聚合函数
TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数如下
- **COUNT**
```mysql
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]
```
功能说明:统计表/超级表中记录行数或某列的非空值个数。
返回结果数据类型长整型INT64。
应用字段:应用全部字段。
适用于:表、超级表。
说明1可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。2针对同一表的不包含NULL值字段查询结果均相同。3如果统计对象是具体的列则返回该列中非NULL值的记录数量。
- **AVG**
```mysql
SELECT AVG(field_name) FROM tb_name [WHERE clause]
```
功能说明:统计表/超级表中某列的平均值。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool字段。
适用于:表、超级表。
- **TWA**
```mysql
SELECT TWA(field_name) FROM tb_name WHERE clause
```
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:时间加权平均(time weighted average, TWA查询需要指定查询时间段的 _开始时间__结束时间_
适用于:表、超级表。
- **SUM**
```mysql
SELECT SUM(field_name) FROM tb_name [WHERE clause]
```
功能说明:统计表/超级表中某列的和。
返回结果数据类型双精度浮点数Double和长整型INT64。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表、超级表。
- **STDDEV**
```mysql
SELECT STDDEV(field_name) FROM tb_name [WHERE clause]
```
功能说明:统计表中某列的均方差。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表。
- **LEASTSQUARES**
```mysql
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
```
功能说明统计表中某列的值是主键时间戳的拟合直线方程。start_val是自变量初始值step_val是自变量的步长值。
返回结果数据类型:字符串表达式(斜率, 截距)。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:自变量是时间戳,因变量是该列的值。
适用于:表。
### 选择函数
- **MIN**
```mysql
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]
```
功能说明:统计表/超级表中某列的值最小值。
返回结果数据类型:同应用的字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
- **MAX**
```mysql
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明:统计表/超级表中某列的值最大值。
返回结果数据类型:同应用的字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
- **FIRST**
```mysql
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明:统计表/超级表中某列的值最先写入的非NULL值。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
说明1如果要返回各个列的首个时间戳最小非NULL值可以使用FIRST(*)2) 如果结果集中的某列全部为NULL值则该列的返回结果也是NULL3) 如果结果集中所有列全部为NULL值则不返回结果。
- **LAST**
```mysql
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明:统计表/超级表中某列的值最后写入的非NULL值。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
说明1如果要返回各个列的最后时间戳最大一个非NULL值可以使用LAST(*)2如果结果集中的某列全部为NULL值则该列的返回结果也是NULL如果结果集中所有列全部为NULL值则不返回结果。
- **TOP**
```mysql
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大则返回时间戳小的。
返回结果数据类型:同应用的字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明1*k*值取值范围1≤*k*≤1002系统同时返回该记录关联的时间戳列。
- **BOTTOM**
```mysql
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小则返回时间戳小的。
返回结果数据类型:同应用的字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明1*k*值取值范围1≤*k*≤1002系统同时返回该记录关联的时间戳列。
- **PERCENTILE**
```mysql
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明:统计表中某列的值百分比分位数。
返回结果数据类型: 双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:*k*值取值范围0≤*P*≤100为0的时候等同于MIN为100的时候等同于MAX。
- **APERCENTILE**
```mysql
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明统计表中某列的值百分比分位数与PERCENTILE函数相似但是返回近似结果。
返回结果数据类型: 双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:*k*值取值范围0≤*P*≤100为0的时候等同于MIN为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
- **LAST_ROW**
```mysql
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }
```
功能说明:返回表(超级表)的最后一条记录。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
说明与last函数不同last_row不支持时间范围限制强制返回最后一条记录。
### 计算函数
- **DIFF**
```mysql
SELECT DIFF(field_name) FROM tb_name [WHERE clause]
```
功能说明:统计表中某列的值与前一行对应值的差。
返回结果数据类型: 同应用字段。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
- **SPREAD**
```mysql
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明:统计表/超级表中某列的最大值和最小值之差。
返回结果数据类型: 双精度浮点数。
应用字段不能应用在binary、nchar、bool类型字段。
说明可用于TIMESTAMP字段此时表示记录的时间覆盖范围。
- **四则运算**
```mysql
SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]
```
功能说明:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。
返回结果数据类型:双精度浮点数。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明1支持两列或多列之间进行计算可使用括号控制计算优先级;2NULL字段不参与计算如果参与计算的某行中包含NULL该行的计算结果为NULL。
##时间维度聚合
TDengine支持按时间段进行聚合可以将表中数据按照时间段进行切割后聚合生成结果比如温度传感器每秒采集一次数据但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下:
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval)
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[GROUP BY tags]
```
- 聚合时间段的长度由关键词INTERVAL指定最短时间间隔10毫秒10a。聚合查询中能够同时执行的聚合和选择函数仅限于单个输出的函数count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last不能使用具有多行输出结果的函数例如top、bottom、diff以及四则运算
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种
1. 不进行填充NONE(默认填充模式)。
2. VALUE填充固定值填充此时需要指定填充的数值。例如fill(value, 1.23)。
3. NULL填充使用NULL填充数据。例如fill(null)。
4. PREV填充使用前一个非NULL值填充数据。例如fill(prev)。
说明:
1. 使用FILL语句的时候可能生成大量的填充输出务必指定查询的时间区间。针对每次查询系统可返回不超过1千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
3. 如果查询对象是超级表则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用group by语句则返回的结果按照时间序列严格单调递增如果查询中使用了group by语句分组则返回结果中每个group内不按照时间序列严格单调递增。
**示例:**温度数据表的建表语句如下:
```mysql
create table sensor(ts timestamp, degree double, pm25 smallint)
```
针对传感器采集的数据以10分钟为一个阶段计算过去24小时的温度数据的平均值、最大值、温度的中位数、以及随着时间变化的温度走势拟合直线。如果没有计算值用前一个非NULL值填充。
```mysql
SELECT AVG(degree),MAX(degree),LEASTSQUARES(degree), PERCENTILE(degree, 50) FROM sensor
WHERE TS>=NOW-1d
INTERVAL(10m)
FILL(PREV);
```

Some files were not shown because too many files have changed in this diff Show More